1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include <traceevent/event-parse.h> 26 #include "mem-events.h" 27 #include "annotate.h" 28 #include "event.h" 29 #include "time-utils.h" 30 #include "cgroup.h" 31 #include "machine.h" 32 #include <linux/kernel.h> 33 #include <linux/string.h> 34 35 regex_t parent_regex; 36 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 37 const char *parent_pattern = default_parent_pattern; 38 const char *default_sort_order = "comm,dso,symbol"; 39 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 40 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 41 const char default_top_sort_order[] = "dso,symbol"; 42 const char default_diff_sort_order[] = "dso,symbol"; 43 const char default_tracepoint_sort_order[] = "trace"; 44 const char *sort_order; 45 const char *field_order; 46 regex_t ignore_callees_regex; 47 int have_ignore_callees = 0; 48 enum sort_mode sort__mode = SORT_MODE__NORMAL; 49 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 50 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 51 52 /* 53 * Replaces all occurrences of a char used with the: 54 * 55 * -t, --field-separator 56 * 57 * option, that uses a special separator character and don't pad with spaces, 58 * replacing all occurrences of this separator in symbol names (and other 59 * output) with a '.' character, that thus it's the only non valid separator. 60 */ 61 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 62 { 63 int n; 64 va_list ap; 65 66 va_start(ap, fmt); 67 n = vsnprintf(bf, size, fmt, ap); 68 if (symbol_conf.field_sep && n > 0) { 69 char *sep = bf; 70 71 while (1) { 72 sep = strchr(sep, *symbol_conf.field_sep); 73 if (sep == NULL) 74 break; 75 *sep = '.'; 76 } 77 } 78 va_end(ap); 79 80 if (n >= (int)size) 81 return size - 1; 82 return n; 83 } 84 85 static int64_t cmp_null(const void *l, const void *r) 86 { 87 if (!l && !r) 88 return 0; 89 else if (!l) 90 return -1; 91 else 92 return 1; 93 } 94 95 /* --sort pid */ 96 97 static int64_t 98 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 99 { 100 return right->thread->tid - left->thread->tid; 101 } 102 103 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 104 size_t size, unsigned int width) 105 { 106 const char *comm = thread__comm_str(he->thread); 107 108 width = max(7U, width) - 8; 109 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 110 width, width, comm ?: ""); 111 } 112 113 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 114 { 115 const struct thread *th = arg; 116 117 if (type != HIST_FILTER__THREAD) 118 return -1; 119 120 return th && he->thread != th; 121 } 122 123 struct sort_entry sort_thread = { 124 .se_header = " Pid:Command", 125 .se_cmp = sort__thread_cmp, 126 .se_snprintf = hist_entry__thread_snprintf, 127 .se_filter = hist_entry__thread_filter, 128 .se_width_idx = HISTC_THREAD, 129 }; 130 131 /* --sort comm */ 132 133 /* 134 * We can't use pointer comparison in functions below, 135 * because it gives different results based on pointer 136 * values, which could break some sorting assumptions. 137 */ 138 static int64_t 139 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int64_t 145 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 146 { 147 return strcmp(comm__str(right->comm), comm__str(left->comm)); 148 } 149 150 static int64_t 151 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 152 { 153 return strcmp(comm__str(right->comm), comm__str(left->comm)); 154 } 155 156 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 157 size_t size, unsigned int width) 158 { 159 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 160 } 161 162 struct sort_entry sort_comm = { 163 .se_header = "Command", 164 .se_cmp = sort__comm_cmp, 165 .se_collapse = sort__comm_collapse, 166 .se_sort = sort__comm_sort, 167 .se_snprintf = hist_entry__comm_snprintf, 168 .se_filter = hist_entry__thread_filter, 169 .se_width_idx = HISTC_COMM, 170 }; 171 172 /* --sort dso */ 173 174 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 175 { 176 struct dso *dso_l = map_l ? map_l->dso : NULL; 177 struct dso *dso_r = map_r ? map_r->dso : NULL; 178 const char *dso_name_l, *dso_name_r; 179 180 if (!dso_l || !dso_r) 181 return cmp_null(dso_r, dso_l); 182 183 if (verbose > 0) { 184 dso_name_l = dso_l->long_name; 185 dso_name_r = dso_r->long_name; 186 } else { 187 dso_name_l = dso_l->short_name; 188 dso_name_r = dso_r->short_name; 189 } 190 191 return strcmp(dso_name_l, dso_name_r); 192 } 193 194 static int64_t 195 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 196 { 197 return _sort__dso_cmp(right->ms.map, left->ms.map); 198 } 199 200 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 201 size_t size, unsigned int width) 202 { 203 if (map && map->dso) { 204 const char *dso_name = verbose > 0 ? map->dso->long_name : 205 map->dso->short_name; 206 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 207 } 208 209 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 210 } 211 212 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 213 size_t size, unsigned int width) 214 { 215 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 216 } 217 218 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 219 { 220 const struct dso *dso = arg; 221 222 if (type != HIST_FILTER__DSO) 223 return -1; 224 225 return dso && (!he->ms.map || he->ms.map->dso != dso); 226 } 227 228 struct sort_entry sort_dso = { 229 .se_header = "Shared Object", 230 .se_cmp = sort__dso_cmp, 231 .se_snprintf = hist_entry__dso_snprintf, 232 .se_filter = hist_entry__dso_filter, 233 .se_width_idx = HISTC_DSO, 234 }; 235 236 /* --sort symbol */ 237 238 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 239 { 240 return (int64_t)(right_ip - left_ip); 241 } 242 243 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 244 { 245 if (!sym_l || !sym_r) 246 return cmp_null(sym_l, sym_r); 247 248 if (sym_l == sym_r) 249 return 0; 250 251 if (sym_l->inlined || sym_r->inlined) { 252 int ret = strcmp(sym_l->name, sym_r->name); 253 254 if (ret) 255 return ret; 256 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 257 return 0; 258 } 259 260 if (sym_l->start != sym_r->start) 261 return (int64_t)(sym_r->start - sym_l->start); 262 263 return (int64_t)(sym_r->end - sym_l->end); 264 } 265 266 static int64_t 267 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 268 { 269 int64_t ret; 270 271 if (!left->ms.sym && !right->ms.sym) 272 return _sort__addr_cmp(left->ip, right->ip); 273 274 /* 275 * comparing symbol address alone is not enough since it's a 276 * relative address within a dso. 277 */ 278 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 279 ret = sort__dso_cmp(left, right); 280 if (ret != 0) 281 return ret; 282 } 283 284 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 285 } 286 287 static int64_t 288 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 289 { 290 if (!left->ms.sym || !right->ms.sym) 291 return cmp_null(left->ms.sym, right->ms.sym); 292 293 return strcmp(right->ms.sym->name, left->ms.sym->name); 294 } 295 296 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 297 u64 ip, char level, char *bf, size_t size, 298 unsigned int width) 299 { 300 struct symbol *sym = ms->sym; 301 struct map *map = ms->map; 302 size_t ret = 0; 303 304 if (verbose > 0) { 305 char o = map ? dso__symtab_origin(map->dso) : '!'; 306 u64 rip = ip; 307 308 if (map && map->dso && map->dso->kernel 309 && map->dso->adjust_symbols) 310 rip = map->unmap_ip(map, ip); 311 312 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 313 BITS_PER_LONG / 4 + 2, rip, o); 314 } 315 316 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 317 if (sym && map) { 318 if (sym->type == STT_OBJECT) { 319 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 320 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 321 ip - map->unmap_ip(map, sym->start)); 322 } else { 323 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 324 width - ret, 325 sym->name); 326 if (sym->inlined) 327 ret += repsep_snprintf(bf + ret, size - ret, 328 " (inlined)"); 329 } 330 } else { 331 size_t len = BITS_PER_LONG / 4; 332 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 333 len, ip); 334 } 335 336 return ret; 337 } 338 339 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 340 { 341 return _hist_entry__sym_snprintf(&he->ms, he->ip, 342 he->level, bf, size, width); 343 } 344 345 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 346 { 347 const char *sym = arg; 348 349 if (type != HIST_FILTER__SYMBOL) 350 return -1; 351 352 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 353 } 354 355 struct sort_entry sort_sym = { 356 .se_header = "Symbol", 357 .se_cmp = sort__sym_cmp, 358 .se_sort = sort__sym_sort, 359 .se_snprintf = hist_entry__sym_snprintf, 360 .se_filter = hist_entry__sym_filter, 361 .se_width_idx = HISTC_SYMBOL, 362 }; 363 364 /* --sort srcline */ 365 366 char *hist_entry__srcline(struct hist_entry *he) 367 { 368 return map__srcline(he->ms.map, he->ip, he->ms.sym); 369 } 370 371 static int64_t 372 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 373 { 374 if (!left->srcline) 375 left->srcline = hist_entry__srcline(left); 376 if (!right->srcline) 377 right->srcline = hist_entry__srcline(right); 378 379 return strcmp(right->srcline, left->srcline); 380 } 381 382 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 383 size_t size, unsigned int width) 384 { 385 if (!he->srcline) 386 he->srcline = hist_entry__srcline(he); 387 388 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 389 } 390 391 struct sort_entry sort_srcline = { 392 .se_header = "Source:Line", 393 .se_cmp = sort__srcline_cmp, 394 .se_snprintf = hist_entry__srcline_snprintf, 395 .se_width_idx = HISTC_SRCLINE, 396 }; 397 398 /* --sort srcline_from */ 399 400 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 401 { 402 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 403 } 404 405 static int64_t 406 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 407 { 408 if (!left->branch_info->srcline_from) 409 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 410 411 if (!right->branch_info->srcline_from) 412 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 413 414 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 415 } 416 417 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 418 size_t size, unsigned int width) 419 { 420 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 421 } 422 423 struct sort_entry sort_srcline_from = { 424 .se_header = "From Source:Line", 425 .se_cmp = sort__srcline_from_cmp, 426 .se_snprintf = hist_entry__srcline_from_snprintf, 427 .se_width_idx = HISTC_SRCLINE_FROM, 428 }; 429 430 /* --sort srcline_to */ 431 432 static int64_t 433 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 434 { 435 if (!left->branch_info->srcline_to) 436 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 437 438 if (!right->branch_info->srcline_to) 439 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 440 441 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 442 } 443 444 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 445 size_t size, unsigned int width) 446 { 447 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 448 } 449 450 struct sort_entry sort_srcline_to = { 451 .se_header = "To Source:Line", 452 .se_cmp = sort__srcline_to_cmp, 453 .se_snprintf = hist_entry__srcline_to_snprintf, 454 .se_width_idx = HISTC_SRCLINE_TO, 455 }; 456 457 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 458 size_t size, unsigned int width) 459 { 460 461 struct symbol *sym = he->ms.sym; 462 struct annotation *notes; 463 double ipc = 0.0, coverage = 0.0; 464 char tmp[64]; 465 466 if (!sym) 467 return repsep_snprintf(bf, size, "%-*s", width, "-"); 468 469 notes = symbol__annotation(sym); 470 471 if (notes->hit_cycles) 472 ipc = notes->hit_insn / ((double)notes->hit_cycles); 473 474 if (notes->total_insn) { 475 coverage = notes->cover_insn * 100.0 / 476 ((double)notes->total_insn); 477 } 478 479 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 480 return repsep_snprintf(bf, size, "%-*s", width, tmp); 481 } 482 483 struct sort_entry sort_sym_ipc = { 484 .se_header = "IPC [IPC Coverage]", 485 .se_cmp = sort__sym_cmp, 486 .se_snprintf = hist_entry__sym_ipc_snprintf, 487 .se_width_idx = HISTC_SYMBOL_IPC, 488 }; 489 490 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 491 __maybe_unused, 492 char *bf, size_t size, 493 unsigned int width) 494 { 495 char tmp[64]; 496 497 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 498 return repsep_snprintf(bf, size, "%-*s", width, tmp); 499 } 500 501 struct sort_entry sort_sym_ipc_null = { 502 .se_header = "IPC [IPC Coverage]", 503 .se_cmp = sort__sym_cmp, 504 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 505 .se_width_idx = HISTC_SYMBOL_IPC, 506 }; 507 508 /* --sort srcfile */ 509 510 static char no_srcfile[1]; 511 512 static char *hist_entry__get_srcfile(struct hist_entry *e) 513 { 514 char *sf, *p; 515 struct map *map = e->ms.map; 516 517 if (!map) 518 return no_srcfile; 519 520 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 521 e->ms.sym, false, true, true, e->ip); 522 if (!strcmp(sf, SRCLINE_UNKNOWN)) 523 return no_srcfile; 524 p = strchr(sf, ':'); 525 if (p && *sf) { 526 *p = 0; 527 return sf; 528 } 529 free(sf); 530 return no_srcfile; 531 } 532 533 static int64_t 534 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 535 { 536 if (!left->srcfile) 537 left->srcfile = hist_entry__get_srcfile(left); 538 if (!right->srcfile) 539 right->srcfile = hist_entry__get_srcfile(right); 540 541 return strcmp(right->srcfile, left->srcfile); 542 } 543 544 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 545 size_t size, unsigned int width) 546 { 547 if (!he->srcfile) 548 he->srcfile = hist_entry__get_srcfile(he); 549 550 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 551 } 552 553 struct sort_entry sort_srcfile = { 554 .se_header = "Source File", 555 .se_cmp = sort__srcfile_cmp, 556 .se_snprintf = hist_entry__srcfile_snprintf, 557 .se_width_idx = HISTC_SRCFILE, 558 }; 559 560 /* --sort parent */ 561 562 static int64_t 563 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 564 { 565 struct symbol *sym_l = left->parent; 566 struct symbol *sym_r = right->parent; 567 568 if (!sym_l || !sym_r) 569 return cmp_null(sym_l, sym_r); 570 571 return strcmp(sym_r->name, sym_l->name); 572 } 573 574 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 575 size_t size, unsigned int width) 576 { 577 return repsep_snprintf(bf, size, "%-*.*s", width, width, 578 he->parent ? he->parent->name : "[other]"); 579 } 580 581 struct sort_entry sort_parent = { 582 .se_header = "Parent symbol", 583 .se_cmp = sort__parent_cmp, 584 .se_snprintf = hist_entry__parent_snprintf, 585 .se_width_idx = HISTC_PARENT, 586 }; 587 588 /* --sort cpu */ 589 590 static int64_t 591 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 592 { 593 return right->cpu - left->cpu; 594 } 595 596 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 597 size_t size, unsigned int width) 598 { 599 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 600 } 601 602 struct sort_entry sort_cpu = { 603 .se_header = "CPU", 604 .se_cmp = sort__cpu_cmp, 605 .se_snprintf = hist_entry__cpu_snprintf, 606 .se_width_idx = HISTC_CPU, 607 }; 608 609 /* --sort cgroup_id */ 610 611 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 612 { 613 return (int64_t)(right_dev - left_dev); 614 } 615 616 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 617 { 618 return (int64_t)(right_ino - left_ino); 619 } 620 621 static int64_t 622 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 623 { 624 int64_t ret; 625 626 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 627 if (ret != 0) 628 return ret; 629 630 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 631 left->cgroup_id.ino); 632 } 633 634 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 635 char *bf, size_t size, 636 unsigned int width __maybe_unused) 637 { 638 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 639 he->cgroup_id.ino); 640 } 641 642 struct sort_entry sort_cgroup_id = { 643 .se_header = "cgroup id (dev/inode)", 644 .se_cmp = sort__cgroup_id_cmp, 645 .se_snprintf = hist_entry__cgroup_id_snprintf, 646 .se_width_idx = HISTC_CGROUP_ID, 647 }; 648 649 /* --sort cgroup */ 650 651 static int64_t 652 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 653 { 654 return right->cgroup - left->cgroup; 655 } 656 657 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 658 char *bf, size_t size, 659 unsigned int width __maybe_unused) 660 { 661 const char *cgrp_name = "N/A"; 662 663 if (he->cgroup) { 664 struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env, 665 he->cgroup); 666 if (cgrp != NULL) 667 cgrp_name = cgrp->name; 668 else 669 cgrp_name = "unknown"; 670 } 671 672 return repsep_snprintf(bf, size, "%s", cgrp_name); 673 } 674 675 struct sort_entry sort_cgroup = { 676 .se_header = "Cgroup", 677 .se_cmp = sort__cgroup_cmp, 678 .se_snprintf = hist_entry__cgroup_snprintf, 679 .se_width_idx = HISTC_CGROUP, 680 }; 681 682 /* --sort socket */ 683 684 static int64_t 685 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 686 { 687 return right->socket - left->socket; 688 } 689 690 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 691 size_t size, unsigned int width) 692 { 693 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 694 } 695 696 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 697 { 698 int sk = *(const int *)arg; 699 700 if (type != HIST_FILTER__SOCKET) 701 return -1; 702 703 return sk >= 0 && he->socket != sk; 704 } 705 706 struct sort_entry sort_socket = { 707 .se_header = "Socket", 708 .se_cmp = sort__socket_cmp, 709 .se_snprintf = hist_entry__socket_snprintf, 710 .se_filter = hist_entry__socket_filter, 711 .se_width_idx = HISTC_SOCKET, 712 }; 713 714 /* --sort time */ 715 716 static int64_t 717 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 718 { 719 return right->time - left->time; 720 } 721 722 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 723 size_t size, unsigned int width) 724 { 725 char he_time[32]; 726 727 if (symbol_conf.nanosecs) 728 timestamp__scnprintf_nsec(he->time, he_time, 729 sizeof(he_time)); 730 else 731 timestamp__scnprintf_usec(he->time, he_time, 732 sizeof(he_time)); 733 734 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 735 } 736 737 struct sort_entry sort_time = { 738 .se_header = "Time", 739 .se_cmp = sort__time_cmp, 740 .se_snprintf = hist_entry__time_snprintf, 741 .se_width_idx = HISTC_TIME, 742 }; 743 744 /* --sort trace */ 745 746 static char *get_trace_output(struct hist_entry *he) 747 { 748 struct trace_seq seq; 749 struct evsel *evsel; 750 struct tep_record rec = { 751 .data = he->raw_data, 752 .size = he->raw_size, 753 }; 754 755 evsel = hists_to_evsel(he->hists); 756 757 trace_seq_init(&seq); 758 if (symbol_conf.raw_trace) { 759 tep_print_fields(&seq, he->raw_data, he->raw_size, 760 evsel->tp_format); 761 } else { 762 tep_print_event(evsel->tp_format->tep, 763 &seq, &rec, "%s", TEP_PRINT_INFO); 764 } 765 /* 766 * Trim the buffer, it starts at 4KB and we're not going to 767 * add anything more to this buffer. 768 */ 769 return realloc(seq.buffer, seq.len + 1); 770 } 771 772 static int64_t 773 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 774 { 775 struct evsel *evsel; 776 777 evsel = hists_to_evsel(left->hists); 778 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 779 return 0; 780 781 if (left->trace_output == NULL) 782 left->trace_output = get_trace_output(left); 783 if (right->trace_output == NULL) 784 right->trace_output = get_trace_output(right); 785 786 return strcmp(right->trace_output, left->trace_output); 787 } 788 789 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 790 size_t size, unsigned int width) 791 { 792 struct evsel *evsel; 793 794 evsel = hists_to_evsel(he->hists); 795 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 796 return scnprintf(bf, size, "%-.*s", width, "N/A"); 797 798 if (he->trace_output == NULL) 799 he->trace_output = get_trace_output(he); 800 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 801 } 802 803 struct sort_entry sort_trace = { 804 .se_header = "Trace output", 805 .se_cmp = sort__trace_cmp, 806 .se_snprintf = hist_entry__trace_snprintf, 807 .se_width_idx = HISTC_TRACE, 808 }; 809 810 /* sort keys for branch stacks */ 811 812 static int64_t 813 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 814 { 815 if (!left->branch_info || !right->branch_info) 816 return cmp_null(left->branch_info, right->branch_info); 817 818 return _sort__dso_cmp(left->branch_info->from.ms.map, 819 right->branch_info->from.ms.map); 820 } 821 822 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 823 size_t size, unsigned int width) 824 { 825 if (he->branch_info) 826 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 827 bf, size, width); 828 else 829 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 830 } 831 832 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 833 const void *arg) 834 { 835 const struct dso *dso = arg; 836 837 if (type != HIST_FILTER__DSO) 838 return -1; 839 840 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 841 he->branch_info->from.ms.map->dso != dso); 842 } 843 844 static int64_t 845 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 846 { 847 if (!left->branch_info || !right->branch_info) 848 return cmp_null(left->branch_info, right->branch_info); 849 850 return _sort__dso_cmp(left->branch_info->to.ms.map, 851 right->branch_info->to.ms.map); 852 } 853 854 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 855 size_t size, unsigned int width) 856 { 857 if (he->branch_info) 858 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 859 bf, size, width); 860 else 861 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 862 } 863 864 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 865 const void *arg) 866 { 867 const struct dso *dso = arg; 868 869 if (type != HIST_FILTER__DSO) 870 return -1; 871 872 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 873 he->branch_info->to.ms.map->dso != dso); 874 } 875 876 static int64_t 877 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 878 { 879 struct addr_map_symbol *from_l = &left->branch_info->from; 880 struct addr_map_symbol *from_r = &right->branch_info->from; 881 882 if (!left->branch_info || !right->branch_info) 883 return cmp_null(left->branch_info, right->branch_info); 884 885 from_l = &left->branch_info->from; 886 from_r = &right->branch_info->from; 887 888 if (!from_l->ms.sym && !from_r->ms.sym) 889 return _sort__addr_cmp(from_l->addr, from_r->addr); 890 891 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 892 } 893 894 static int64_t 895 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 896 { 897 struct addr_map_symbol *to_l, *to_r; 898 899 if (!left->branch_info || !right->branch_info) 900 return cmp_null(left->branch_info, right->branch_info); 901 902 to_l = &left->branch_info->to; 903 to_r = &right->branch_info->to; 904 905 if (!to_l->ms.sym && !to_r->ms.sym) 906 return _sort__addr_cmp(to_l->addr, to_r->addr); 907 908 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 909 } 910 911 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 912 size_t size, unsigned int width) 913 { 914 if (he->branch_info) { 915 struct addr_map_symbol *from = &he->branch_info->from; 916 917 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 918 from->al_level, bf, size, width); 919 } 920 921 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 922 } 923 924 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 925 size_t size, unsigned int width) 926 { 927 if (he->branch_info) { 928 struct addr_map_symbol *to = &he->branch_info->to; 929 930 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 931 to->al_level, bf, size, width); 932 } 933 934 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 935 } 936 937 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 938 const void *arg) 939 { 940 const char *sym = arg; 941 942 if (type != HIST_FILTER__SYMBOL) 943 return -1; 944 945 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 946 strstr(he->branch_info->from.ms.sym->name, sym)); 947 } 948 949 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 950 const void *arg) 951 { 952 const char *sym = arg; 953 954 if (type != HIST_FILTER__SYMBOL) 955 return -1; 956 957 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 958 strstr(he->branch_info->to.ms.sym->name, sym)); 959 } 960 961 struct sort_entry sort_dso_from = { 962 .se_header = "Source Shared Object", 963 .se_cmp = sort__dso_from_cmp, 964 .se_snprintf = hist_entry__dso_from_snprintf, 965 .se_filter = hist_entry__dso_from_filter, 966 .se_width_idx = HISTC_DSO_FROM, 967 }; 968 969 struct sort_entry sort_dso_to = { 970 .se_header = "Target Shared Object", 971 .se_cmp = sort__dso_to_cmp, 972 .se_snprintf = hist_entry__dso_to_snprintf, 973 .se_filter = hist_entry__dso_to_filter, 974 .se_width_idx = HISTC_DSO_TO, 975 }; 976 977 struct sort_entry sort_sym_from = { 978 .se_header = "Source Symbol", 979 .se_cmp = sort__sym_from_cmp, 980 .se_snprintf = hist_entry__sym_from_snprintf, 981 .se_filter = hist_entry__sym_from_filter, 982 .se_width_idx = HISTC_SYMBOL_FROM, 983 }; 984 985 struct sort_entry sort_sym_to = { 986 .se_header = "Target Symbol", 987 .se_cmp = sort__sym_to_cmp, 988 .se_snprintf = hist_entry__sym_to_snprintf, 989 .se_filter = hist_entry__sym_to_filter, 990 .se_width_idx = HISTC_SYMBOL_TO, 991 }; 992 993 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 994 u64 ip, char level, char *bf, size_t size, 995 unsigned int width) 996 { 997 struct symbol *sym = ms->sym; 998 struct map *map = ms->map; 999 size_t ret = 0, offs; 1000 1001 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1002 if (sym && map) { 1003 if (sym->type == STT_OBJECT) { 1004 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1005 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1006 ip - map->unmap_ip(map, sym->start)); 1007 } else { 1008 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1009 width - ret, 1010 sym->name); 1011 offs = ip - sym->start; 1012 if (offs) 1013 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1014 } 1015 } else { 1016 size_t len = BITS_PER_LONG / 4; 1017 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1018 len, ip); 1019 } 1020 1021 return ret; 1022 } 1023 1024 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1025 size_t size, unsigned int width) 1026 { 1027 if (he->branch_info) { 1028 struct addr_map_symbol *from = &he->branch_info->from; 1029 1030 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1031 he->level, bf, size, width); 1032 } 1033 1034 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1035 } 1036 1037 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1038 size_t size, unsigned int width) 1039 { 1040 if (he->branch_info) { 1041 struct addr_map_symbol *to = &he->branch_info->to; 1042 1043 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1044 he->level, bf, size, width); 1045 } 1046 1047 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1048 } 1049 1050 static int64_t 1051 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1052 { 1053 struct addr_map_symbol *from_l; 1054 struct addr_map_symbol *from_r; 1055 int64_t ret; 1056 1057 if (!left->branch_info || !right->branch_info) 1058 return cmp_null(left->branch_info, right->branch_info); 1059 1060 from_l = &left->branch_info->from; 1061 from_r = &right->branch_info->from; 1062 1063 /* 1064 * comparing symbol address alone is not enough since it's a 1065 * relative address within a dso. 1066 */ 1067 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1068 if (ret != 0) 1069 return ret; 1070 1071 return _sort__addr_cmp(from_l->addr, from_r->addr); 1072 } 1073 1074 static int64_t 1075 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1076 { 1077 struct addr_map_symbol *to_l; 1078 struct addr_map_symbol *to_r; 1079 int64_t ret; 1080 1081 if (!left->branch_info || !right->branch_info) 1082 return cmp_null(left->branch_info, right->branch_info); 1083 1084 to_l = &left->branch_info->to; 1085 to_r = &right->branch_info->to; 1086 1087 /* 1088 * comparing symbol address alone is not enough since it's a 1089 * relative address within a dso. 1090 */ 1091 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1092 if (ret != 0) 1093 return ret; 1094 1095 return _sort__addr_cmp(to_l->addr, to_r->addr); 1096 } 1097 1098 struct sort_entry sort_addr_from = { 1099 .se_header = "Source Address", 1100 .se_cmp = sort__addr_from_cmp, 1101 .se_snprintf = hist_entry__addr_from_snprintf, 1102 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1103 .se_width_idx = HISTC_ADDR_FROM, 1104 }; 1105 1106 struct sort_entry sort_addr_to = { 1107 .se_header = "Target Address", 1108 .se_cmp = sort__addr_to_cmp, 1109 .se_snprintf = hist_entry__addr_to_snprintf, 1110 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1111 .se_width_idx = HISTC_ADDR_TO, 1112 }; 1113 1114 1115 static int64_t 1116 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1117 { 1118 unsigned char mp, p; 1119 1120 if (!left->branch_info || !right->branch_info) 1121 return cmp_null(left->branch_info, right->branch_info); 1122 1123 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1124 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1125 return mp || p; 1126 } 1127 1128 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1129 size_t size, unsigned int width){ 1130 static const char *out = "N/A"; 1131 1132 if (he->branch_info) { 1133 if (he->branch_info->flags.predicted) 1134 out = "N"; 1135 else if (he->branch_info->flags.mispred) 1136 out = "Y"; 1137 } 1138 1139 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1140 } 1141 1142 static int64_t 1143 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1144 { 1145 if (!left->branch_info || !right->branch_info) 1146 return cmp_null(left->branch_info, right->branch_info); 1147 1148 return left->branch_info->flags.cycles - 1149 right->branch_info->flags.cycles; 1150 } 1151 1152 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1153 size_t size, unsigned int width) 1154 { 1155 if (!he->branch_info) 1156 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1157 if (he->branch_info->flags.cycles == 0) 1158 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1159 return repsep_snprintf(bf, size, "%-*hd", width, 1160 he->branch_info->flags.cycles); 1161 } 1162 1163 struct sort_entry sort_cycles = { 1164 .se_header = "Basic Block Cycles", 1165 .se_cmp = sort__cycles_cmp, 1166 .se_snprintf = hist_entry__cycles_snprintf, 1167 .se_width_idx = HISTC_CYCLES, 1168 }; 1169 1170 /* --sort daddr_sym */ 1171 int64_t 1172 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1173 { 1174 uint64_t l = 0, r = 0; 1175 1176 if (left->mem_info) 1177 l = left->mem_info->daddr.addr; 1178 if (right->mem_info) 1179 r = right->mem_info->daddr.addr; 1180 1181 return (int64_t)(r - l); 1182 } 1183 1184 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1185 size_t size, unsigned int width) 1186 { 1187 uint64_t addr = 0; 1188 struct map_symbol *ms = NULL; 1189 1190 if (he->mem_info) { 1191 addr = he->mem_info->daddr.addr; 1192 ms = &he->mem_info->daddr.ms; 1193 } 1194 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1195 } 1196 1197 int64_t 1198 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1199 { 1200 uint64_t l = 0, r = 0; 1201 1202 if (left->mem_info) 1203 l = left->mem_info->iaddr.addr; 1204 if (right->mem_info) 1205 r = right->mem_info->iaddr.addr; 1206 1207 return (int64_t)(r - l); 1208 } 1209 1210 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1211 size_t size, unsigned int width) 1212 { 1213 uint64_t addr = 0; 1214 struct map_symbol *ms = NULL; 1215 1216 if (he->mem_info) { 1217 addr = he->mem_info->iaddr.addr; 1218 ms = &he->mem_info->iaddr.ms; 1219 } 1220 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1221 } 1222 1223 static int64_t 1224 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1225 { 1226 struct map *map_l = NULL; 1227 struct map *map_r = NULL; 1228 1229 if (left->mem_info) 1230 map_l = left->mem_info->daddr.ms.map; 1231 if (right->mem_info) 1232 map_r = right->mem_info->daddr.ms.map; 1233 1234 return _sort__dso_cmp(map_l, map_r); 1235 } 1236 1237 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1238 size_t size, unsigned int width) 1239 { 1240 struct map *map = NULL; 1241 1242 if (he->mem_info) 1243 map = he->mem_info->daddr.ms.map; 1244 1245 return _hist_entry__dso_snprintf(map, bf, size, width); 1246 } 1247 1248 static int64_t 1249 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1250 { 1251 union perf_mem_data_src data_src_l; 1252 union perf_mem_data_src data_src_r; 1253 1254 if (left->mem_info) 1255 data_src_l = left->mem_info->data_src; 1256 else 1257 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1258 1259 if (right->mem_info) 1260 data_src_r = right->mem_info->data_src; 1261 else 1262 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1263 1264 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1265 } 1266 1267 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1268 size_t size, unsigned int width) 1269 { 1270 char out[10]; 1271 1272 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1273 return repsep_snprintf(bf, size, "%.*s", width, out); 1274 } 1275 1276 static int64_t 1277 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1278 { 1279 union perf_mem_data_src data_src_l; 1280 union perf_mem_data_src data_src_r; 1281 1282 if (left->mem_info) 1283 data_src_l = left->mem_info->data_src; 1284 else 1285 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1286 1287 if (right->mem_info) 1288 data_src_r = right->mem_info->data_src; 1289 else 1290 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1291 1292 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1293 } 1294 1295 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1296 size_t size, unsigned int width) 1297 { 1298 char out[64]; 1299 1300 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1301 return repsep_snprintf(bf, size, "%-*s", width, out); 1302 } 1303 1304 static int64_t 1305 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1306 { 1307 union perf_mem_data_src data_src_l; 1308 union perf_mem_data_src data_src_r; 1309 1310 if (left->mem_info) 1311 data_src_l = left->mem_info->data_src; 1312 else 1313 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1314 1315 if (right->mem_info) 1316 data_src_r = right->mem_info->data_src; 1317 else 1318 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1319 1320 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1321 } 1322 1323 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1324 size_t size, unsigned int width) 1325 { 1326 char out[64]; 1327 1328 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1329 return repsep_snprintf(bf, size, "%-*s", width, out); 1330 } 1331 1332 static int64_t 1333 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1334 { 1335 union perf_mem_data_src data_src_l; 1336 union perf_mem_data_src data_src_r; 1337 1338 if (left->mem_info) 1339 data_src_l = left->mem_info->data_src; 1340 else 1341 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1342 1343 if (right->mem_info) 1344 data_src_r = right->mem_info->data_src; 1345 else 1346 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1347 1348 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1349 } 1350 1351 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1352 size_t size, unsigned int width) 1353 { 1354 char out[64]; 1355 1356 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1357 return repsep_snprintf(bf, size, "%-*s", width, out); 1358 } 1359 1360 int64_t 1361 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1362 { 1363 u64 l, r; 1364 struct map *l_map, *r_map; 1365 int rc; 1366 1367 if (!left->mem_info) return -1; 1368 if (!right->mem_info) return 1; 1369 1370 /* group event types together */ 1371 if (left->cpumode > right->cpumode) return -1; 1372 if (left->cpumode < right->cpumode) return 1; 1373 1374 l_map = left->mem_info->daddr.ms.map; 1375 r_map = right->mem_info->daddr.ms.map; 1376 1377 /* if both are NULL, jump to sort on al_addr instead */ 1378 if (!l_map && !r_map) 1379 goto addr; 1380 1381 if (!l_map) return -1; 1382 if (!r_map) return 1; 1383 1384 rc = dso__cmp_id(l_map->dso, r_map->dso); 1385 if (rc) 1386 return rc; 1387 /* 1388 * Addresses with no major/minor numbers are assumed to be 1389 * anonymous in userspace. Sort those on pid then address. 1390 * 1391 * The kernel and non-zero major/minor mapped areas are 1392 * assumed to be unity mapped. Sort those on address. 1393 */ 1394 1395 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1396 (!(l_map->flags & MAP_SHARED)) && 1397 !l_map->dso->id.maj && !l_map->dso->id.min && 1398 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) { 1399 /* userspace anonymous */ 1400 1401 if (left->thread->pid_ > right->thread->pid_) return -1; 1402 if (left->thread->pid_ < right->thread->pid_) return 1; 1403 } 1404 1405 addr: 1406 /* al_addr does all the right addr - start + offset calculations */ 1407 l = cl_address(left->mem_info->daddr.al_addr); 1408 r = cl_address(right->mem_info->daddr.al_addr); 1409 1410 if (l > r) return -1; 1411 if (l < r) return 1; 1412 1413 return 0; 1414 } 1415 1416 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1417 size_t size, unsigned int width) 1418 { 1419 1420 uint64_t addr = 0; 1421 struct map_symbol *ms = NULL; 1422 char level = he->level; 1423 1424 if (he->mem_info) { 1425 struct map *map = he->mem_info->daddr.ms.map; 1426 1427 addr = cl_address(he->mem_info->daddr.al_addr); 1428 ms = &he->mem_info->daddr.ms; 1429 1430 /* print [s] for shared data mmaps */ 1431 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1432 map && !(map->prot & PROT_EXEC) && 1433 (map->flags & MAP_SHARED) && 1434 (map->dso->id.maj || map->dso->id.min || 1435 map->dso->id.ino || map->dso->id.ino_generation)) 1436 level = 's'; 1437 else if (!map) 1438 level = 'X'; 1439 } 1440 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1441 } 1442 1443 struct sort_entry sort_mispredict = { 1444 .se_header = "Branch Mispredicted", 1445 .se_cmp = sort__mispredict_cmp, 1446 .se_snprintf = hist_entry__mispredict_snprintf, 1447 .se_width_idx = HISTC_MISPREDICT, 1448 }; 1449 1450 static int64_t 1451 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1452 { 1453 return left->weight - right->weight; 1454 } 1455 1456 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1457 size_t size, unsigned int width) 1458 { 1459 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1460 } 1461 1462 struct sort_entry sort_local_weight = { 1463 .se_header = "Local Weight", 1464 .se_cmp = sort__weight_cmp, 1465 .se_snprintf = hist_entry__local_weight_snprintf, 1466 .se_width_idx = HISTC_LOCAL_WEIGHT, 1467 }; 1468 1469 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1470 size_t size, unsigned int width) 1471 { 1472 return repsep_snprintf(bf, size, "%-*llu", width, 1473 he->weight * he->stat.nr_events); 1474 } 1475 1476 struct sort_entry sort_global_weight = { 1477 .se_header = "Weight", 1478 .se_cmp = sort__weight_cmp, 1479 .se_snprintf = hist_entry__global_weight_snprintf, 1480 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1481 }; 1482 1483 static int64_t 1484 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1485 { 1486 return left->ins_lat - right->ins_lat; 1487 } 1488 1489 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1490 size_t size, unsigned int width) 1491 { 1492 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1493 } 1494 1495 struct sort_entry sort_local_ins_lat = { 1496 .se_header = "Local INSTR Latency", 1497 .se_cmp = sort__ins_lat_cmp, 1498 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1499 .se_width_idx = HISTC_LOCAL_INS_LAT, 1500 }; 1501 1502 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1503 size_t size, unsigned int width) 1504 { 1505 return repsep_snprintf(bf, size, "%-*u", width, 1506 he->ins_lat * he->stat.nr_events); 1507 } 1508 1509 struct sort_entry sort_global_ins_lat = { 1510 .se_header = "INSTR Latency", 1511 .se_cmp = sort__ins_lat_cmp, 1512 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1513 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1514 }; 1515 1516 static int64_t 1517 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1518 { 1519 return left->p_stage_cyc - right->p_stage_cyc; 1520 } 1521 1522 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1523 size_t size, unsigned int width) 1524 { 1525 return repsep_snprintf(bf, size, "%-*u", width, 1526 he->p_stage_cyc * he->stat.nr_events); 1527 } 1528 1529 1530 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1531 size_t size, unsigned int width) 1532 { 1533 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1534 } 1535 1536 struct sort_entry sort_local_p_stage_cyc = { 1537 .se_header = "Local Pipeline Stage Cycle", 1538 .se_cmp = sort__p_stage_cyc_cmp, 1539 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1540 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1541 }; 1542 1543 struct sort_entry sort_global_p_stage_cyc = { 1544 .se_header = "Pipeline Stage Cycle", 1545 .se_cmp = sort__p_stage_cyc_cmp, 1546 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1547 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1548 }; 1549 1550 struct sort_entry sort_mem_daddr_sym = { 1551 .se_header = "Data Symbol", 1552 .se_cmp = sort__daddr_cmp, 1553 .se_snprintf = hist_entry__daddr_snprintf, 1554 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1555 }; 1556 1557 struct sort_entry sort_mem_iaddr_sym = { 1558 .se_header = "Code Symbol", 1559 .se_cmp = sort__iaddr_cmp, 1560 .se_snprintf = hist_entry__iaddr_snprintf, 1561 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1562 }; 1563 1564 struct sort_entry sort_mem_daddr_dso = { 1565 .se_header = "Data Object", 1566 .se_cmp = sort__dso_daddr_cmp, 1567 .se_snprintf = hist_entry__dso_daddr_snprintf, 1568 .se_width_idx = HISTC_MEM_DADDR_DSO, 1569 }; 1570 1571 struct sort_entry sort_mem_locked = { 1572 .se_header = "Locked", 1573 .se_cmp = sort__locked_cmp, 1574 .se_snprintf = hist_entry__locked_snprintf, 1575 .se_width_idx = HISTC_MEM_LOCKED, 1576 }; 1577 1578 struct sort_entry sort_mem_tlb = { 1579 .se_header = "TLB access", 1580 .se_cmp = sort__tlb_cmp, 1581 .se_snprintf = hist_entry__tlb_snprintf, 1582 .se_width_idx = HISTC_MEM_TLB, 1583 }; 1584 1585 struct sort_entry sort_mem_lvl = { 1586 .se_header = "Memory access", 1587 .se_cmp = sort__lvl_cmp, 1588 .se_snprintf = hist_entry__lvl_snprintf, 1589 .se_width_idx = HISTC_MEM_LVL, 1590 }; 1591 1592 struct sort_entry sort_mem_snoop = { 1593 .se_header = "Snoop", 1594 .se_cmp = sort__snoop_cmp, 1595 .se_snprintf = hist_entry__snoop_snprintf, 1596 .se_width_idx = HISTC_MEM_SNOOP, 1597 }; 1598 1599 struct sort_entry sort_mem_dcacheline = { 1600 .se_header = "Data Cacheline", 1601 .se_cmp = sort__dcacheline_cmp, 1602 .se_snprintf = hist_entry__dcacheline_snprintf, 1603 .se_width_idx = HISTC_MEM_DCACHELINE, 1604 }; 1605 1606 static int64_t 1607 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1608 { 1609 union perf_mem_data_src data_src_l; 1610 union perf_mem_data_src data_src_r; 1611 1612 if (left->mem_info) 1613 data_src_l = left->mem_info->data_src; 1614 else 1615 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1616 1617 if (right->mem_info) 1618 data_src_r = right->mem_info->data_src; 1619 else 1620 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1621 1622 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1623 } 1624 1625 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1626 size_t size, unsigned int width) 1627 { 1628 char out[16]; 1629 1630 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1631 return repsep_snprintf(bf, size, "%.*s", width, out); 1632 } 1633 1634 struct sort_entry sort_mem_blocked = { 1635 .se_header = "Blocked", 1636 .se_cmp = sort__blocked_cmp, 1637 .se_snprintf = hist_entry__blocked_snprintf, 1638 .se_width_idx = HISTC_MEM_BLOCKED, 1639 }; 1640 1641 static int64_t 1642 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1643 { 1644 uint64_t l = 0, r = 0; 1645 1646 if (left->mem_info) 1647 l = left->mem_info->daddr.phys_addr; 1648 if (right->mem_info) 1649 r = right->mem_info->daddr.phys_addr; 1650 1651 return (int64_t)(r - l); 1652 } 1653 1654 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1655 size_t size, unsigned int width) 1656 { 1657 uint64_t addr = 0; 1658 size_t ret = 0; 1659 size_t len = BITS_PER_LONG / 4; 1660 1661 addr = he->mem_info->daddr.phys_addr; 1662 1663 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1664 1665 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1666 1667 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1668 1669 if (ret > width) 1670 bf[width] = '\0'; 1671 1672 return width; 1673 } 1674 1675 struct sort_entry sort_mem_phys_daddr = { 1676 .se_header = "Data Physical Address", 1677 .se_cmp = sort__phys_daddr_cmp, 1678 .se_snprintf = hist_entry__phys_daddr_snprintf, 1679 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1680 }; 1681 1682 static int64_t 1683 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1684 { 1685 uint64_t l = 0, r = 0; 1686 1687 if (left->mem_info) 1688 l = left->mem_info->daddr.data_page_size; 1689 if (right->mem_info) 1690 r = right->mem_info->daddr.data_page_size; 1691 1692 return (int64_t)(r - l); 1693 } 1694 1695 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1696 size_t size, unsigned int width) 1697 { 1698 char str[PAGE_SIZE_NAME_LEN]; 1699 1700 return repsep_snprintf(bf, size, "%-*s", width, 1701 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1702 } 1703 1704 struct sort_entry sort_mem_data_page_size = { 1705 .se_header = "Data Page Size", 1706 .se_cmp = sort__data_page_size_cmp, 1707 .se_snprintf = hist_entry__data_page_size_snprintf, 1708 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1709 }; 1710 1711 static int64_t 1712 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1713 { 1714 uint64_t l = left->code_page_size; 1715 uint64_t r = right->code_page_size; 1716 1717 return (int64_t)(r - l); 1718 } 1719 1720 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1721 size_t size, unsigned int width) 1722 { 1723 char str[PAGE_SIZE_NAME_LEN]; 1724 1725 return repsep_snprintf(bf, size, "%-*s", width, 1726 get_page_size_name(he->code_page_size, str)); 1727 } 1728 1729 struct sort_entry sort_code_page_size = { 1730 .se_header = "Code Page Size", 1731 .se_cmp = sort__code_page_size_cmp, 1732 .se_snprintf = hist_entry__code_page_size_snprintf, 1733 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1734 }; 1735 1736 static int64_t 1737 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1738 { 1739 if (!left->branch_info || !right->branch_info) 1740 return cmp_null(left->branch_info, right->branch_info); 1741 1742 return left->branch_info->flags.abort != 1743 right->branch_info->flags.abort; 1744 } 1745 1746 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1747 size_t size, unsigned int width) 1748 { 1749 static const char *out = "N/A"; 1750 1751 if (he->branch_info) { 1752 if (he->branch_info->flags.abort) 1753 out = "A"; 1754 else 1755 out = "."; 1756 } 1757 1758 return repsep_snprintf(bf, size, "%-*s", width, out); 1759 } 1760 1761 struct sort_entry sort_abort = { 1762 .se_header = "Transaction abort", 1763 .se_cmp = sort__abort_cmp, 1764 .se_snprintf = hist_entry__abort_snprintf, 1765 .se_width_idx = HISTC_ABORT, 1766 }; 1767 1768 static int64_t 1769 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1770 { 1771 if (!left->branch_info || !right->branch_info) 1772 return cmp_null(left->branch_info, right->branch_info); 1773 1774 return left->branch_info->flags.in_tx != 1775 right->branch_info->flags.in_tx; 1776 } 1777 1778 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1779 size_t size, unsigned int width) 1780 { 1781 static const char *out = "N/A"; 1782 1783 if (he->branch_info) { 1784 if (he->branch_info->flags.in_tx) 1785 out = "T"; 1786 else 1787 out = "."; 1788 } 1789 1790 return repsep_snprintf(bf, size, "%-*s", width, out); 1791 } 1792 1793 struct sort_entry sort_in_tx = { 1794 .se_header = "Branch in transaction", 1795 .se_cmp = sort__in_tx_cmp, 1796 .se_snprintf = hist_entry__in_tx_snprintf, 1797 .se_width_idx = HISTC_IN_TX, 1798 }; 1799 1800 static int64_t 1801 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1802 { 1803 return left->transaction - right->transaction; 1804 } 1805 1806 static inline char *add_str(char *p, const char *str) 1807 { 1808 strcpy(p, str); 1809 return p + strlen(str); 1810 } 1811 1812 static struct txbit { 1813 unsigned flag; 1814 const char *name; 1815 int skip_for_len; 1816 } txbits[] = { 1817 { PERF_TXN_ELISION, "EL ", 0 }, 1818 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1819 { PERF_TXN_SYNC, "SYNC ", 1 }, 1820 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1821 { PERF_TXN_RETRY, "RETRY ", 0 }, 1822 { PERF_TXN_CONFLICT, "CON ", 0 }, 1823 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1824 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1825 { 0, NULL, 0 } 1826 }; 1827 1828 int hist_entry__transaction_len(void) 1829 { 1830 int i; 1831 int len = 0; 1832 1833 for (i = 0; txbits[i].name; i++) { 1834 if (!txbits[i].skip_for_len) 1835 len += strlen(txbits[i].name); 1836 } 1837 len += 4; /* :XX<space> */ 1838 return len; 1839 } 1840 1841 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1842 size_t size, unsigned int width) 1843 { 1844 u64 t = he->transaction; 1845 char buf[128]; 1846 char *p = buf; 1847 int i; 1848 1849 buf[0] = 0; 1850 for (i = 0; txbits[i].name; i++) 1851 if (txbits[i].flag & t) 1852 p = add_str(p, txbits[i].name); 1853 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1854 p = add_str(p, "NEITHER "); 1855 if (t & PERF_TXN_ABORT_MASK) { 1856 sprintf(p, ":%" PRIx64, 1857 (t & PERF_TXN_ABORT_MASK) >> 1858 PERF_TXN_ABORT_SHIFT); 1859 p += strlen(p); 1860 } 1861 1862 return repsep_snprintf(bf, size, "%-*s", width, buf); 1863 } 1864 1865 struct sort_entry sort_transaction = { 1866 .se_header = "Transaction ", 1867 .se_cmp = sort__transaction_cmp, 1868 .se_snprintf = hist_entry__transaction_snprintf, 1869 .se_width_idx = HISTC_TRANSACTION, 1870 }; 1871 1872 /* --sort symbol_size */ 1873 1874 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1875 { 1876 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1877 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1878 1879 return size_l < size_r ? -1 : 1880 size_l == size_r ? 0 : 1; 1881 } 1882 1883 static int64_t 1884 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1885 { 1886 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1887 } 1888 1889 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1890 size_t bf_size, unsigned int width) 1891 { 1892 if (sym) 1893 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1894 1895 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1896 } 1897 1898 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1899 size_t size, unsigned int width) 1900 { 1901 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1902 } 1903 1904 struct sort_entry sort_sym_size = { 1905 .se_header = "Symbol size", 1906 .se_cmp = sort__sym_size_cmp, 1907 .se_snprintf = hist_entry__sym_size_snprintf, 1908 .se_width_idx = HISTC_SYM_SIZE, 1909 }; 1910 1911 /* --sort dso_size */ 1912 1913 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1914 { 1915 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1916 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1917 1918 return size_l < size_r ? -1 : 1919 size_l == size_r ? 0 : 1; 1920 } 1921 1922 static int64_t 1923 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1924 { 1925 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1926 } 1927 1928 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1929 size_t bf_size, unsigned int width) 1930 { 1931 if (map && map->dso) 1932 return repsep_snprintf(bf, bf_size, "%*d", width, 1933 map__size(map)); 1934 1935 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1936 } 1937 1938 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1939 size_t size, unsigned int width) 1940 { 1941 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1942 } 1943 1944 struct sort_entry sort_dso_size = { 1945 .se_header = "DSO size", 1946 .se_cmp = sort__dso_size_cmp, 1947 .se_snprintf = hist_entry__dso_size_snprintf, 1948 .se_width_idx = HISTC_DSO_SIZE, 1949 }; 1950 1951 /* --sort dso_size */ 1952 1953 static int64_t 1954 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 1955 { 1956 u64 left_ip = left->ip; 1957 u64 right_ip = right->ip; 1958 struct map *left_map = left->ms.map; 1959 struct map *right_map = right->ms.map; 1960 1961 if (left_map) 1962 left_ip = left_map->unmap_ip(left_map, left_ip); 1963 if (right_map) 1964 right_ip = right_map->unmap_ip(right_map, right_ip); 1965 1966 return _sort__addr_cmp(left_ip, right_ip); 1967 } 1968 1969 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 1970 size_t size, unsigned int width) 1971 { 1972 u64 ip = he->ip; 1973 struct map *map = he->ms.map; 1974 1975 if (map) 1976 ip = map->unmap_ip(map, ip); 1977 1978 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 1979 } 1980 1981 struct sort_entry sort_addr = { 1982 .se_header = "Address", 1983 .se_cmp = sort__addr_cmp, 1984 .se_snprintf = hist_entry__addr_snprintf, 1985 .se_width_idx = HISTC_ADDR, 1986 }; 1987 1988 1989 struct sort_dimension { 1990 const char *name; 1991 struct sort_entry *entry; 1992 int taken; 1993 }; 1994 1995 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 1996 { 1997 return 0; 1998 } 1999 2000 const char * __weak arch_perf_header_entry(const char *se_header) 2001 { 2002 return se_header; 2003 } 2004 2005 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2006 { 2007 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2008 } 2009 2010 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2011 2012 static struct sort_dimension common_sort_dimensions[] = { 2013 DIM(SORT_PID, "pid", sort_thread), 2014 DIM(SORT_COMM, "comm", sort_comm), 2015 DIM(SORT_DSO, "dso", sort_dso), 2016 DIM(SORT_SYM, "symbol", sort_sym), 2017 DIM(SORT_PARENT, "parent", sort_parent), 2018 DIM(SORT_CPU, "cpu", sort_cpu), 2019 DIM(SORT_SOCKET, "socket", sort_socket), 2020 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2021 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2022 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2023 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2024 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2025 DIM(SORT_TRACE, "trace", sort_trace), 2026 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2027 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2028 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2029 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2030 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2031 DIM(SORT_TIME, "time", sort_time), 2032 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2033 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2034 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2035 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2036 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2037 DIM(SORT_ADDR, "addr", sort_addr), 2038 }; 2039 2040 #undef DIM 2041 2042 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2043 2044 static struct sort_dimension bstack_sort_dimensions[] = { 2045 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2046 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2047 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2048 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2049 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2050 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2051 DIM(SORT_ABORT, "abort", sort_abort), 2052 DIM(SORT_CYCLES, "cycles", sort_cycles), 2053 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2054 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2055 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2056 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2057 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2058 }; 2059 2060 #undef DIM 2061 2062 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2063 2064 static struct sort_dimension memory_sort_dimensions[] = { 2065 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2066 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2067 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2068 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2069 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2070 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2071 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2072 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2073 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2074 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2075 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2076 }; 2077 2078 #undef DIM 2079 2080 struct hpp_dimension { 2081 const char *name; 2082 struct perf_hpp_fmt *fmt; 2083 int taken; 2084 }; 2085 2086 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2087 2088 static struct hpp_dimension hpp_sort_dimensions[] = { 2089 DIM(PERF_HPP__OVERHEAD, "overhead"), 2090 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2091 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2092 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2093 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2094 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2095 DIM(PERF_HPP__SAMPLES, "sample"), 2096 DIM(PERF_HPP__PERIOD, "period"), 2097 }; 2098 2099 #undef DIM 2100 2101 struct hpp_sort_entry { 2102 struct perf_hpp_fmt hpp; 2103 struct sort_entry *se; 2104 }; 2105 2106 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2107 { 2108 struct hpp_sort_entry *hse; 2109 2110 if (!perf_hpp__is_sort_entry(fmt)) 2111 return; 2112 2113 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2114 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2115 } 2116 2117 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2118 struct hists *hists, int line __maybe_unused, 2119 int *span __maybe_unused) 2120 { 2121 struct hpp_sort_entry *hse; 2122 size_t len = fmt->user_len; 2123 2124 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2125 2126 if (!len) 2127 len = hists__col_len(hists, hse->se->se_width_idx); 2128 2129 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2130 } 2131 2132 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2133 struct perf_hpp *hpp __maybe_unused, 2134 struct hists *hists) 2135 { 2136 struct hpp_sort_entry *hse; 2137 size_t len = fmt->user_len; 2138 2139 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2140 2141 if (!len) 2142 len = hists__col_len(hists, hse->se->se_width_idx); 2143 2144 return len; 2145 } 2146 2147 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2148 struct hist_entry *he) 2149 { 2150 struct hpp_sort_entry *hse; 2151 size_t len = fmt->user_len; 2152 2153 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2154 2155 if (!len) 2156 len = hists__col_len(he->hists, hse->se->se_width_idx); 2157 2158 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2159 } 2160 2161 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2162 struct hist_entry *a, struct hist_entry *b) 2163 { 2164 struct hpp_sort_entry *hse; 2165 2166 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2167 return hse->se->se_cmp(a, b); 2168 } 2169 2170 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2171 struct hist_entry *a, struct hist_entry *b) 2172 { 2173 struct hpp_sort_entry *hse; 2174 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2175 2176 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2177 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2178 return collapse_fn(a, b); 2179 } 2180 2181 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2182 struct hist_entry *a, struct hist_entry *b) 2183 { 2184 struct hpp_sort_entry *hse; 2185 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2186 2187 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2188 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2189 return sort_fn(a, b); 2190 } 2191 2192 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2193 { 2194 return format->header == __sort__hpp_header; 2195 } 2196 2197 #define MK_SORT_ENTRY_CHK(key) \ 2198 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2199 { \ 2200 struct hpp_sort_entry *hse; \ 2201 \ 2202 if (!perf_hpp__is_sort_entry(fmt)) \ 2203 return false; \ 2204 \ 2205 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2206 return hse->se == &sort_ ## key ; \ 2207 } 2208 2209 MK_SORT_ENTRY_CHK(trace) 2210 MK_SORT_ENTRY_CHK(srcline) 2211 MK_SORT_ENTRY_CHK(srcfile) 2212 MK_SORT_ENTRY_CHK(thread) 2213 MK_SORT_ENTRY_CHK(comm) 2214 MK_SORT_ENTRY_CHK(dso) 2215 MK_SORT_ENTRY_CHK(sym) 2216 2217 2218 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2219 { 2220 struct hpp_sort_entry *hse_a; 2221 struct hpp_sort_entry *hse_b; 2222 2223 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2224 return false; 2225 2226 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2227 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2228 2229 return hse_a->se == hse_b->se; 2230 } 2231 2232 static void hse_free(struct perf_hpp_fmt *fmt) 2233 { 2234 struct hpp_sort_entry *hse; 2235 2236 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2237 free(hse); 2238 } 2239 2240 static struct hpp_sort_entry * 2241 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2242 { 2243 struct hpp_sort_entry *hse; 2244 2245 hse = malloc(sizeof(*hse)); 2246 if (hse == NULL) { 2247 pr_err("Memory allocation failed\n"); 2248 return NULL; 2249 } 2250 2251 hse->se = sd->entry; 2252 hse->hpp.name = sd->entry->se_header; 2253 hse->hpp.header = __sort__hpp_header; 2254 hse->hpp.width = __sort__hpp_width; 2255 hse->hpp.entry = __sort__hpp_entry; 2256 hse->hpp.color = NULL; 2257 2258 hse->hpp.cmp = __sort__hpp_cmp; 2259 hse->hpp.collapse = __sort__hpp_collapse; 2260 hse->hpp.sort = __sort__hpp_sort; 2261 hse->hpp.equal = __sort__hpp_equal; 2262 hse->hpp.free = hse_free; 2263 2264 INIT_LIST_HEAD(&hse->hpp.list); 2265 INIT_LIST_HEAD(&hse->hpp.sort_list); 2266 hse->hpp.elide = false; 2267 hse->hpp.len = 0; 2268 hse->hpp.user_len = 0; 2269 hse->hpp.level = level; 2270 2271 return hse; 2272 } 2273 2274 static void hpp_free(struct perf_hpp_fmt *fmt) 2275 { 2276 free(fmt); 2277 } 2278 2279 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2280 int level) 2281 { 2282 struct perf_hpp_fmt *fmt; 2283 2284 fmt = memdup(hd->fmt, sizeof(*fmt)); 2285 if (fmt) { 2286 INIT_LIST_HEAD(&fmt->list); 2287 INIT_LIST_HEAD(&fmt->sort_list); 2288 fmt->free = hpp_free; 2289 fmt->level = level; 2290 } 2291 2292 return fmt; 2293 } 2294 2295 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2296 { 2297 struct perf_hpp_fmt *fmt; 2298 struct hpp_sort_entry *hse; 2299 int ret = -1; 2300 int r; 2301 2302 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2303 if (!perf_hpp__is_sort_entry(fmt)) 2304 continue; 2305 2306 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2307 if (hse->se->se_filter == NULL) 2308 continue; 2309 2310 /* 2311 * hist entry is filtered if any of sort key in the hpp list 2312 * is applied. But it should skip non-matched filter types. 2313 */ 2314 r = hse->se->se_filter(he, type, arg); 2315 if (r >= 0) { 2316 if (ret < 0) 2317 ret = 0; 2318 ret |= r; 2319 } 2320 } 2321 2322 return ret; 2323 } 2324 2325 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2326 struct perf_hpp_list *list, 2327 int level) 2328 { 2329 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2330 2331 if (hse == NULL) 2332 return -1; 2333 2334 perf_hpp_list__register_sort_field(list, &hse->hpp); 2335 return 0; 2336 } 2337 2338 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2339 struct perf_hpp_list *list) 2340 { 2341 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2342 2343 if (hse == NULL) 2344 return -1; 2345 2346 perf_hpp_list__column_register(list, &hse->hpp); 2347 return 0; 2348 } 2349 2350 struct hpp_dynamic_entry { 2351 struct perf_hpp_fmt hpp; 2352 struct evsel *evsel; 2353 struct tep_format_field *field; 2354 unsigned dynamic_len; 2355 bool raw_trace; 2356 }; 2357 2358 static int hde_width(struct hpp_dynamic_entry *hde) 2359 { 2360 if (!hde->hpp.len) { 2361 int len = hde->dynamic_len; 2362 int namelen = strlen(hde->field->name); 2363 int fieldlen = hde->field->size; 2364 2365 if (namelen > len) 2366 len = namelen; 2367 2368 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2369 /* length for print hex numbers */ 2370 fieldlen = hde->field->size * 2 + 2; 2371 } 2372 if (fieldlen > len) 2373 len = fieldlen; 2374 2375 hde->hpp.len = len; 2376 } 2377 return hde->hpp.len; 2378 } 2379 2380 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2381 struct hist_entry *he) 2382 { 2383 char *str, *pos; 2384 struct tep_format_field *field = hde->field; 2385 size_t namelen; 2386 bool last = false; 2387 2388 if (hde->raw_trace) 2389 return; 2390 2391 /* parse pretty print result and update max length */ 2392 if (!he->trace_output) 2393 he->trace_output = get_trace_output(he); 2394 2395 namelen = strlen(field->name); 2396 str = he->trace_output; 2397 2398 while (str) { 2399 pos = strchr(str, ' '); 2400 if (pos == NULL) { 2401 last = true; 2402 pos = str + strlen(str); 2403 } 2404 2405 if (!strncmp(str, field->name, namelen)) { 2406 size_t len; 2407 2408 str += namelen + 1; 2409 len = pos - str; 2410 2411 if (len > hde->dynamic_len) 2412 hde->dynamic_len = len; 2413 break; 2414 } 2415 2416 if (last) 2417 str = NULL; 2418 else 2419 str = pos + 1; 2420 } 2421 } 2422 2423 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2424 struct hists *hists __maybe_unused, 2425 int line __maybe_unused, 2426 int *span __maybe_unused) 2427 { 2428 struct hpp_dynamic_entry *hde; 2429 size_t len = fmt->user_len; 2430 2431 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2432 2433 if (!len) 2434 len = hde_width(hde); 2435 2436 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2437 } 2438 2439 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2440 struct perf_hpp *hpp __maybe_unused, 2441 struct hists *hists __maybe_unused) 2442 { 2443 struct hpp_dynamic_entry *hde; 2444 size_t len = fmt->user_len; 2445 2446 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2447 2448 if (!len) 2449 len = hde_width(hde); 2450 2451 return len; 2452 } 2453 2454 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2455 { 2456 struct hpp_dynamic_entry *hde; 2457 2458 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2459 2460 return hists_to_evsel(hists) == hde->evsel; 2461 } 2462 2463 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2464 struct hist_entry *he) 2465 { 2466 struct hpp_dynamic_entry *hde; 2467 size_t len = fmt->user_len; 2468 char *str, *pos; 2469 struct tep_format_field *field; 2470 size_t namelen; 2471 bool last = false; 2472 int ret; 2473 2474 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2475 2476 if (!len) 2477 len = hde_width(hde); 2478 2479 if (hde->raw_trace) 2480 goto raw_field; 2481 2482 if (!he->trace_output) 2483 he->trace_output = get_trace_output(he); 2484 2485 field = hde->field; 2486 namelen = strlen(field->name); 2487 str = he->trace_output; 2488 2489 while (str) { 2490 pos = strchr(str, ' '); 2491 if (pos == NULL) { 2492 last = true; 2493 pos = str + strlen(str); 2494 } 2495 2496 if (!strncmp(str, field->name, namelen)) { 2497 str += namelen + 1; 2498 str = strndup(str, pos - str); 2499 2500 if (str == NULL) 2501 return scnprintf(hpp->buf, hpp->size, 2502 "%*.*s", len, len, "ERROR"); 2503 break; 2504 } 2505 2506 if (last) 2507 str = NULL; 2508 else 2509 str = pos + 1; 2510 } 2511 2512 if (str == NULL) { 2513 struct trace_seq seq; 2514 raw_field: 2515 trace_seq_init(&seq); 2516 tep_print_field(&seq, he->raw_data, hde->field); 2517 str = seq.buffer; 2518 } 2519 2520 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2521 free(str); 2522 return ret; 2523 } 2524 2525 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2526 struct hist_entry *a, struct hist_entry *b) 2527 { 2528 struct hpp_dynamic_entry *hde; 2529 struct tep_format_field *field; 2530 unsigned offset, size; 2531 2532 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2533 2534 if (b == NULL) { 2535 update_dynamic_len(hde, a); 2536 return 0; 2537 } 2538 2539 field = hde->field; 2540 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2541 unsigned long long dyn; 2542 2543 tep_read_number_field(field, a->raw_data, &dyn); 2544 offset = dyn & 0xffff; 2545 size = (dyn >> 16) & 0xffff; 2546 if (field->flags & TEP_FIELD_IS_RELATIVE) 2547 offset += field->offset + field->size; 2548 2549 /* record max width for output */ 2550 if (size > hde->dynamic_len) 2551 hde->dynamic_len = size; 2552 } else { 2553 offset = field->offset; 2554 size = field->size; 2555 } 2556 2557 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2558 } 2559 2560 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2561 { 2562 return fmt->cmp == __sort__hde_cmp; 2563 } 2564 2565 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2566 { 2567 struct hpp_dynamic_entry *hde_a; 2568 struct hpp_dynamic_entry *hde_b; 2569 2570 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2571 return false; 2572 2573 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2574 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2575 2576 return hde_a->field == hde_b->field; 2577 } 2578 2579 static void hde_free(struct perf_hpp_fmt *fmt) 2580 { 2581 struct hpp_dynamic_entry *hde; 2582 2583 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2584 free(hde); 2585 } 2586 2587 static struct hpp_dynamic_entry * 2588 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2589 int level) 2590 { 2591 struct hpp_dynamic_entry *hde; 2592 2593 hde = malloc(sizeof(*hde)); 2594 if (hde == NULL) { 2595 pr_debug("Memory allocation failed\n"); 2596 return NULL; 2597 } 2598 2599 hde->evsel = evsel; 2600 hde->field = field; 2601 hde->dynamic_len = 0; 2602 2603 hde->hpp.name = field->name; 2604 hde->hpp.header = __sort__hde_header; 2605 hde->hpp.width = __sort__hde_width; 2606 hde->hpp.entry = __sort__hde_entry; 2607 hde->hpp.color = NULL; 2608 2609 hde->hpp.cmp = __sort__hde_cmp; 2610 hde->hpp.collapse = __sort__hde_cmp; 2611 hde->hpp.sort = __sort__hde_cmp; 2612 hde->hpp.equal = __sort__hde_equal; 2613 hde->hpp.free = hde_free; 2614 2615 INIT_LIST_HEAD(&hde->hpp.list); 2616 INIT_LIST_HEAD(&hde->hpp.sort_list); 2617 hde->hpp.elide = false; 2618 hde->hpp.len = 0; 2619 hde->hpp.user_len = 0; 2620 hde->hpp.level = level; 2621 2622 return hde; 2623 } 2624 2625 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2626 { 2627 struct perf_hpp_fmt *new_fmt = NULL; 2628 2629 if (perf_hpp__is_sort_entry(fmt)) { 2630 struct hpp_sort_entry *hse, *new_hse; 2631 2632 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2633 new_hse = memdup(hse, sizeof(*hse)); 2634 if (new_hse) 2635 new_fmt = &new_hse->hpp; 2636 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2637 struct hpp_dynamic_entry *hde, *new_hde; 2638 2639 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2640 new_hde = memdup(hde, sizeof(*hde)); 2641 if (new_hde) 2642 new_fmt = &new_hde->hpp; 2643 } else { 2644 new_fmt = memdup(fmt, sizeof(*fmt)); 2645 } 2646 2647 INIT_LIST_HEAD(&new_fmt->list); 2648 INIT_LIST_HEAD(&new_fmt->sort_list); 2649 2650 return new_fmt; 2651 } 2652 2653 static int parse_field_name(char *str, char **event, char **field, char **opt) 2654 { 2655 char *event_name, *field_name, *opt_name; 2656 2657 event_name = str; 2658 field_name = strchr(str, '.'); 2659 2660 if (field_name) { 2661 *field_name++ = '\0'; 2662 } else { 2663 event_name = NULL; 2664 field_name = str; 2665 } 2666 2667 opt_name = strchr(field_name, '/'); 2668 if (opt_name) 2669 *opt_name++ = '\0'; 2670 2671 *event = event_name; 2672 *field = field_name; 2673 *opt = opt_name; 2674 2675 return 0; 2676 } 2677 2678 /* find match evsel using a given event name. The event name can be: 2679 * 1. '%' + event index (e.g. '%1' for first event) 2680 * 2. full event name (e.g. sched:sched_switch) 2681 * 3. partial event name (should not contain ':') 2682 */ 2683 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2684 { 2685 struct evsel *evsel = NULL; 2686 struct evsel *pos; 2687 bool full_name; 2688 2689 /* case 1 */ 2690 if (event_name[0] == '%') { 2691 int nr = strtol(event_name+1, NULL, 0); 2692 2693 if (nr > evlist->core.nr_entries) 2694 return NULL; 2695 2696 evsel = evlist__first(evlist); 2697 while (--nr > 0) 2698 evsel = evsel__next(evsel); 2699 2700 return evsel; 2701 } 2702 2703 full_name = !!strchr(event_name, ':'); 2704 evlist__for_each_entry(evlist, pos) { 2705 /* case 2 */ 2706 if (full_name && !strcmp(pos->name, event_name)) 2707 return pos; 2708 /* case 3 */ 2709 if (!full_name && strstr(pos->name, event_name)) { 2710 if (evsel) { 2711 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2712 event_name, evsel->name, pos->name); 2713 return NULL; 2714 } 2715 evsel = pos; 2716 } 2717 } 2718 2719 return evsel; 2720 } 2721 2722 static int __dynamic_dimension__add(struct evsel *evsel, 2723 struct tep_format_field *field, 2724 bool raw_trace, int level) 2725 { 2726 struct hpp_dynamic_entry *hde; 2727 2728 hde = __alloc_dynamic_entry(evsel, field, level); 2729 if (hde == NULL) 2730 return -ENOMEM; 2731 2732 hde->raw_trace = raw_trace; 2733 2734 perf_hpp__register_sort_field(&hde->hpp); 2735 return 0; 2736 } 2737 2738 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2739 { 2740 int ret; 2741 struct tep_format_field *field; 2742 2743 field = evsel->tp_format->format.fields; 2744 while (field) { 2745 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2746 if (ret < 0) 2747 return ret; 2748 2749 field = field->next; 2750 } 2751 return 0; 2752 } 2753 2754 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2755 int level) 2756 { 2757 int ret; 2758 struct evsel *evsel; 2759 2760 evlist__for_each_entry(evlist, evsel) { 2761 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2762 continue; 2763 2764 ret = add_evsel_fields(evsel, raw_trace, level); 2765 if (ret < 0) 2766 return ret; 2767 } 2768 return 0; 2769 } 2770 2771 static int add_all_matching_fields(struct evlist *evlist, 2772 char *field_name, bool raw_trace, int level) 2773 { 2774 int ret = -ESRCH; 2775 struct evsel *evsel; 2776 struct tep_format_field *field; 2777 2778 evlist__for_each_entry(evlist, evsel) { 2779 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2780 continue; 2781 2782 field = tep_find_any_field(evsel->tp_format, field_name); 2783 if (field == NULL) 2784 continue; 2785 2786 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2787 if (ret < 0) 2788 break; 2789 } 2790 return ret; 2791 } 2792 2793 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2794 int level) 2795 { 2796 char *str, *event_name, *field_name, *opt_name; 2797 struct evsel *evsel; 2798 struct tep_format_field *field; 2799 bool raw_trace = symbol_conf.raw_trace; 2800 int ret = 0; 2801 2802 if (evlist == NULL) 2803 return -ENOENT; 2804 2805 str = strdup(tok); 2806 if (str == NULL) 2807 return -ENOMEM; 2808 2809 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2810 ret = -EINVAL; 2811 goto out; 2812 } 2813 2814 if (opt_name) { 2815 if (strcmp(opt_name, "raw")) { 2816 pr_debug("unsupported field option %s\n", opt_name); 2817 ret = -EINVAL; 2818 goto out; 2819 } 2820 raw_trace = true; 2821 } 2822 2823 if (!strcmp(field_name, "trace_fields")) { 2824 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2825 goto out; 2826 } 2827 2828 if (event_name == NULL) { 2829 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2830 goto out; 2831 } 2832 2833 evsel = find_evsel(evlist, event_name); 2834 if (evsel == NULL) { 2835 pr_debug("Cannot find event: %s\n", event_name); 2836 ret = -ENOENT; 2837 goto out; 2838 } 2839 2840 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2841 pr_debug("%s is not a tracepoint event\n", event_name); 2842 ret = -EINVAL; 2843 goto out; 2844 } 2845 2846 if (!strcmp(field_name, "*")) { 2847 ret = add_evsel_fields(evsel, raw_trace, level); 2848 } else { 2849 field = tep_find_any_field(evsel->tp_format, field_name); 2850 if (field == NULL) { 2851 pr_debug("Cannot find event field for %s.%s\n", 2852 event_name, field_name); 2853 return -ENOENT; 2854 } 2855 2856 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2857 } 2858 2859 out: 2860 free(str); 2861 return ret; 2862 } 2863 2864 static int __sort_dimension__add(struct sort_dimension *sd, 2865 struct perf_hpp_list *list, 2866 int level) 2867 { 2868 if (sd->taken) 2869 return 0; 2870 2871 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2872 return -1; 2873 2874 if (sd->entry->se_collapse) 2875 list->need_collapse = 1; 2876 2877 sd->taken = 1; 2878 2879 return 0; 2880 } 2881 2882 static int __hpp_dimension__add(struct hpp_dimension *hd, 2883 struct perf_hpp_list *list, 2884 int level) 2885 { 2886 struct perf_hpp_fmt *fmt; 2887 2888 if (hd->taken) 2889 return 0; 2890 2891 fmt = __hpp_dimension__alloc_hpp(hd, level); 2892 if (!fmt) 2893 return -1; 2894 2895 hd->taken = 1; 2896 perf_hpp_list__register_sort_field(list, fmt); 2897 return 0; 2898 } 2899 2900 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2901 struct sort_dimension *sd) 2902 { 2903 if (sd->taken) 2904 return 0; 2905 2906 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2907 return -1; 2908 2909 sd->taken = 1; 2910 return 0; 2911 } 2912 2913 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2914 struct hpp_dimension *hd) 2915 { 2916 struct perf_hpp_fmt *fmt; 2917 2918 if (hd->taken) 2919 return 0; 2920 2921 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2922 if (!fmt) 2923 return -1; 2924 2925 hd->taken = 1; 2926 perf_hpp_list__column_register(list, fmt); 2927 return 0; 2928 } 2929 2930 int hpp_dimension__add_output(unsigned col) 2931 { 2932 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2933 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2934 } 2935 2936 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2937 struct evlist *evlist, 2938 int level) 2939 { 2940 unsigned int i, j; 2941 2942 /* 2943 * Check to see if there are any arch specific 2944 * sort dimensions not applicable for the current 2945 * architecture. If so, Skip that sort key since 2946 * we don't want to display it in the output fields. 2947 */ 2948 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 2949 if (!strcmp(arch_specific_sort_keys[j], tok) && 2950 !arch_support_sort_key(tok)) { 2951 return 0; 2952 } 2953 } 2954 2955 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2956 struct sort_dimension *sd = &common_sort_dimensions[i]; 2957 2958 if (strncasecmp(tok, sd->name, strlen(tok))) 2959 continue; 2960 2961 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 2962 if (!strcmp(dynamic_headers[j], sd->name)) 2963 sort_dimension_add_dynamic_header(sd); 2964 } 2965 2966 if (sd->entry == &sort_parent) { 2967 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2968 if (ret) { 2969 char err[BUFSIZ]; 2970 2971 regerror(ret, &parent_regex, err, sizeof(err)); 2972 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2973 return -EINVAL; 2974 } 2975 list->parent = 1; 2976 } else if (sd->entry == &sort_sym) { 2977 list->sym = 1; 2978 /* 2979 * perf diff displays the performance difference amongst 2980 * two or more perf.data files. Those files could come 2981 * from different binaries. So we should not compare 2982 * their ips, but the name of symbol. 2983 */ 2984 if (sort__mode == SORT_MODE__DIFF) 2985 sd->entry->se_collapse = sort__sym_sort; 2986 2987 } else if (sd->entry == &sort_dso) { 2988 list->dso = 1; 2989 } else if (sd->entry == &sort_socket) { 2990 list->socket = 1; 2991 } else if (sd->entry == &sort_thread) { 2992 list->thread = 1; 2993 } else if (sd->entry == &sort_comm) { 2994 list->comm = 1; 2995 } 2996 2997 return __sort_dimension__add(sd, list, level); 2998 } 2999 3000 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3001 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3002 3003 if (strncasecmp(tok, hd->name, strlen(tok))) 3004 continue; 3005 3006 return __hpp_dimension__add(hd, list, level); 3007 } 3008 3009 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3010 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3011 3012 if (strncasecmp(tok, sd->name, strlen(tok))) 3013 continue; 3014 3015 if (sort__mode != SORT_MODE__BRANCH) 3016 return -EINVAL; 3017 3018 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3019 list->sym = 1; 3020 3021 __sort_dimension__add(sd, list, level); 3022 return 0; 3023 } 3024 3025 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3026 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3027 3028 if (strncasecmp(tok, sd->name, strlen(tok))) 3029 continue; 3030 3031 if (sort__mode != SORT_MODE__MEMORY) 3032 return -EINVAL; 3033 3034 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3035 return -EINVAL; 3036 3037 if (sd->entry == &sort_mem_daddr_sym) 3038 list->sym = 1; 3039 3040 __sort_dimension__add(sd, list, level); 3041 return 0; 3042 } 3043 3044 if (!add_dynamic_entry(evlist, tok, level)) 3045 return 0; 3046 3047 return -ESRCH; 3048 } 3049 3050 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3051 struct evlist *evlist) 3052 { 3053 char *tmp, *tok; 3054 int ret = 0; 3055 int level = 0; 3056 int next_level = 1; 3057 bool in_group = false; 3058 3059 do { 3060 tok = str; 3061 tmp = strpbrk(str, "{}, "); 3062 if (tmp) { 3063 if (in_group) 3064 next_level = level; 3065 else 3066 next_level = level + 1; 3067 3068 if (*tmp == '{') 3069 in_group = true; 3070 else if (*tmp == '}') 3071 in_group = false; 3072 3073 *tmp = '\0'; 3074 str = tmp + 1; 3075 } 3076 3077 if (*tok) { 3078 ret = sort_dimension__add(list, tok, evlist, level); 3079 if (ret == -EINVAL) { 3080 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3081 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3082 else 3083 ui__error("Invalid --sort key: `%s'", tok); 3084 break; 3085 } else if (ret == -ESRCH) { 3086 ui__error("Unknown --sort key: `%s'", tok); 3087 break; 3088 } 3089 } 3090 3091 level = next_level; 3092 } while (tmp); 3093 3094 return ret; 3095 } 3096 3097 static const char *get_default_sort_order(struct evlist *evlist) 3098 { 3099 const char *default_sort_orders[] = { 3100 default_sort_order, 3101 default_branch_sort_order, 3102 default_mem_sort_order, 3103 default_top_sort_order, 3104 default_diff_sort_order, 3105 default_tracepoint_sort_order, 3106 }; 3107 bool use_trace = true; 3108 struct evsel *evsel; 3109 3110 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3111 3112 if (evlist == NULL || evlist__empty(evlist)) 3113 goto out_no_evlist; 3114 3115 evlist__for_each_entry(evlist, evsel) { 3116 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3117 use_trace = false; 3118 break; 3119 } 3120 } 3121 3122 if (use_trace) { 3123 sort__mode = SORT_MODE__TRACEPOINT; 3124 if (symbol_conf.raw_trace) 3125 return "trace_fields"; 3126 } 3127 out_no_evlist: 3128 return default_sort_orders[sort__mode]; 3129 } 3130 3131 static int setup_sort_order(struct evlist *evlist) 3132 { 3133 char *new_sort_order; 3134 3135 /* 3136 * Append '+'-prefixed sort order to the default sort 3137 * order string. 3138 */ 3139 if (!sort_order || is_strict_order(sort_order)) 3140 return 0; 3141 3142 if (sort_order[1] == '\0') { 3143 ui__error("Invalid --sort key: `+'"); 3144 return -EINVAL; 3145 } 3146 3147 /* 3148 * We allocate new sort_order string, but we never free it, 3149 * because it's checked over the rest of the code. 3150 */ 3151 if (asprintf(&new_sort_order, "%s,%s", 3152 get_default_sort_order(evlist), sort_order + 1) < 0) { 3153 pr_err("Not enough memory to set up --sort"); 3154 return -ENOMEM; 3155 } 3156 3157 sort_order = new_sort_order; 3158 return 0; 3159 } 3160 3161 /* 3162 * Adds 'pre,' prefix into 'str' is 'pre' is 3163 * not already part of 'str'. 3164 */ 3165 static char *prefix_if_not_in(const char *pre, char *str) 3166 { 3167 char *n; 3168 3169 if (!str || strstr(str, pre)) 3170 return str; 3171 3172 if (asprintf(&n, "%s,%s", pre, str) < 0) 3173 n = NULL; 3174 3175 free(str); 3176 return n; 3177 } 3178 3179 static char *setup_overhead(char *keys) 3180 { 3181 if (sort__mode == SORT_MODE__DIFF) 3182 return keys; 3183 3184 keys = prefix_if_not_in("overhead", keys); 3185 3186 if (symbol_conf.cumulate_callchain) 3187 keys = prefix_if_not_in("overhead_children", keys); 3188 3189 return keys; 3190 } 3191 3192 static int __setup_sorting(struct evlist *evlist) 3193 { 3194 char *str; 3195 const char *sort_keys; 3196 int ret = 0; 3197 3198 ret = setup_sort_order(evlist); 3199 if (ret) 3200 return ret; 3201 3202 sort_keys = sort_order; 3203 if (sort_keys == NULL) { 3204 if (is_strict_order(field_order)) { 3205 /* 3206 * If user specified field order but no sort order, 3207 * we'll honor it and not add default sort orders. 3208 */ 3209 return 0; 3210 } 3211 3212 sort_keys = get_default_sort_order(evlist); 3213 } 3214 3215 str = strdup(sort_keys); 3216 if (str == NULL) { 3217 pr_err("Not enough memory to setup sort keys"); 3218 return -ENOMEM; 3219 } 3220 3221 /* 3222 * Prepend overhead fields for backward compatibility. 3223 */ 3224 if (!is_strict_order(field_order)) { 3225 str = setup_overhead(str); 3226 if (str == NULL) { 3227 pr_err("Not enough memory to setup overhead keys"); 3228 return -ENOMEM; 3229 } 3230 } 3231 3232 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3233 3234 free(str); 3235 return ret; 3236 } 3237 3238 void perf_hpp__set_elide(int idx, bool elide) 3239 { 3240 struct perf_hpp_fmt *fmt; 3241 struct hpp_sort_entry *hse; 3242 3243 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3244 if (!perf_hpp__is_sort_entry(fmt)) 3245 continue; 3246 3247 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3248 if (hse->se->se_width_idx == idx) { 3249 fmt->elide = elide; 3250 break; 3251 } 3252 } 3253 } 3254 3255 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3256 { 3257 if (list && strlist__nr_entries(list) == 1) { 3258 if (fp != NULL) 3259 fprintf(fp, "# %s: %s\n", list_name, 3260 strlist__entry(list, 0)->s); 3261 return true; 3262 } 3263 return false; 3264 } 3265 3266 static bool get_elide(int idx, FILE *output) 3267 { 3268 switch (idx) { 3269 case HISTC_SYMBOL: 3270 return __get_elide(symbol_conf.sym_list, "symbol", output); 3271 case HISTC_DSO: 3272 return __get_elide(symbol_conf.dso_list, "dso", output); 3273 case HISTC_COMM: 3274 return __get_elide(symbol_conf.comm_list, "comm", output); 3275 default: 3276 break; 3277 } 3278 3279 if (sort__mode != SORT_MODE__BRANCH) 3280 return false; 3281 3282 switch (idx) { 3283 case HISTC_SYMBOL_FROM: 3284 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3285 case HISTC_SYMBOL_TO: 3286 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3287 case HISTC_DSO_FROM: 3288 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3289 case HISTC_DSO_TO: 3290 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3291 case HISTC_ADDR_FROM: 3292 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3293 case HISTC_ADDR_TO: 3294 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3295 default: 3296 break; 3297 } 3298 3299 return false; 3300 } 3301 3302 void sort__setup_elide(FILE *output) 3303 { 3304 struct perf_hpp_fmt *fmt; 3305 struct hpp_sort_entry *hse; 3306 3307 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3308 if (!perf_hpp__is_sort_entry(fmt)) 3309 continue; 3310 3311 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3312 fmt->elide = get_elide(hse->se->se_width_idx, output); 3313 } 3314 3315 /* 3316 * It makes no sense to elide all of sort entries. 3317 * Just revert them to show up again. 3318 */ 3319 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3320 if (!perf_hpp__is_sort_entry(fmt)) 3321 continue; 3322 3323 if (!fmt->elide) 3324 return; 3325 } 3326 3327 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3328 if (!perf_hpp__is_sort_entry(fmt)) 3329 continue; 3330 3331 fmt->elide = false; 3332 } 3333 } 3334 3335 int output_field_add(struct perf_hpp_list *list, char *tok) 3336 { 3337 unsigned int i; 3338 3339 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3340 struct sort_dimension *sd = &common_sort_dimensions[i]; 3341 3342 if (strncasecmp(tok, sd->name, strlen(tok))) 3343 continue; 3344 3345 return __sort_dimension__add_output(list, sd); 3346 } 3347 3348 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3349 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3350 3351 if (strncasecmp(tok, hd->name, strlen(tok))) 3352 continue; 3353 3354 return __hpp_dimension__add_output(list, hd); 3355 } 3356 3357 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3358 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3359 3360 if (strncasecmp(tok, sd->name, strlen(tok))) 3361 continue; 3362 3363 if (sort__mode != SORT_MODE__BRANCH) 3364 return -EINVAL; 3365 3366 return __sort_dimension__add_output(list, sd); 3367 } 3368 3369 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3370 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3371 3372 if (strncasecmp(tok, sd->name, strlen(tok))) 3373 continue; 3374 3375 if (sort__mode != SORT_MODE__MEMORY) 3376 return -EINVAL; 3377 3378 return __sort_dimension__add_output(list, sd); 3379 } 3380 3381 return -ESRCH; 3382 } 3383 3384 static int setup_output_list(struct perf_hpp_list *list, char *str) 3385 { 3386 char *tmp, *tok; 3387 int ret = 0; 3388 3389 for (tok = strtok_r(str, ", ", &tmp); 3390 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3391 ret = output_field_add(list, tok); 3392 if (ret == -EINVAL) { 3393 ui__error("Invalid --fields key: `%s'", tok); 3394 break; 3395 } else if (ret == -ESRCH) { 3396 ui__error("Unknown --fields key: `%s'", tok); 3397 break; 3398 } 3399 } 3400 3401 return ret; 3402 } 3403 3404 void reset_dimensions(void) 3405 { 3406 unsigned int i; 3407 3408 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3409 common_sort_dimensions[i].taken = 0; 3410 3411 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3412 hpp_sort_dimensions[i].taken = 0; 3413 3414 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3415 bstack_sort_dimensions[i].taken = 0; 3416 3417 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3418 memory_sort_dimensions[i].taken = 0; 3419 } 3420 3421 bool is_strict_order(const char *order) 3422 { 3423 return order && (*order != '+'); 3424 } 3425 3426 static int __setup_output_field(void) 3427 { 3428 char *str, *strp; 3429 int ret = -EINVAL; 3430 3431 if (field_order == NULL) 3432 return 0; 3433 3434 strp = str = strdup(field_order); 3435 if (str == NULL) { 3436 pr_err("Not enough memory to setup output fields"); 3437 return -ENOMEM; 3438 } 3439 3440 if (!is_strict_order(field_order)) 3441 strp++; 3442 3443 if (!strlen(strp)) { 3444 ui__error("Invalid --fields key: `+'"); 3445 goto out; 3446 } 3447 3448 ret = setup_output_list(&perf_hpp_list, strp); 3449 3450 out: 3451 free(str); 3452 return ret; 3453 } 3454 3455 int setup_sorting(struct evlist *evlist) 3456 { 3457 int err; 3458 3459 err = __setup_sorting(evlist); 3460 if (err < 0) 3461 return err; 3462 3463 if (parent_pattern != default_parent_pattern) { 3464 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3465 if (err < 0) 3466 return err; 3467 } 3468 3469 reset_dimensions(); 3470 3471 /* 3472 * perf diff doesn't use default hpp output fields. 3473 */ 3474 if (sort__mode != SORT_MODE__DIFF) 3475 perf_hpp__init(); 3476 3477 err = __setup_output_field(); 3478 if (err < 0) 3479 return err; 3480 3481 /* copy sort keys to output fields */ 3482 perf_hpp__setup_output_field(&perf_hpp_list); 3483 /* and then copy output fields to sort keys */ 3484 perf_hpp__append_sort_keys(&perf_hpp_list); 3485 3486 /* setup hists-specific output fields */ 3487 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3488 return -1; 3489 3490 return 0; 3491 } 3492 3493 void reset_output_field(void) 3494 { 3495 perf_hpp_list.need_collapse = 0; 3496 perf_hpp_list.parent = 0; 3497 perf_hpp_list.sym = 0; 3498 perf_hpp_list.dso = 0; 3499 3500 field_order = NULL; 3501 sort_order = NULL; 3502 3503 reset_dimensions(); 3504 perf_hpp__reset_output_field(&perf_hpp_list); 3505 } 3506 3507 #define INDENT (3*8 + 1) 3508 3509 static void add_key(struct strbuf *sb, const char *str, int *llen) 3510 { 3511 if (*llen >= 75) { 3512 strbuf_addstr(sb, "\n\t\t\t "); 3513 *llen = INDENT; 3514 } 3515 strbuf_addf(sb, " %s", str); 3516 *llen += strlen(str) + 1; 3517 } 3518 3519 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3520 int *llen) 3521 { 3522 int i; 3523 3524 for (i = 0; i < n; i++) 3525 add_key(sb, s[i].name, llen); 3526 } 3527 3528 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3529 int *llen) 3530 { 3531 int i; 3532 3533 for (i = 0; i < n; i++) 3534 add_key(sb, s[i].name, llen); 3535 } 3536 3537 char *sort_help(const char *prefix) 3538 { 3539 struct strbuf sb; 3540 char *s; 3541 int len = strlen(prefix) + INDENT; 3542 3543 strbuf_init(&sb, 300); 3544 strbuf_addstr(&sb, prefix); 3545 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3546 ARRAY_SIZE(hpp_sort_dimensions), &len); 3547 add_sort_string(&sb, common_sort_dimensions, 3548 ARRAY_SIZE(common_sort_dimensions), &len); 3549 add_sort_string(&sb, bstack_sort_dimensions, 3550 ARRAY_SIZE(bstack_sort_dimensions), &len); 3551 add_sort_string(&sb, memory_sort_dimensions, 3552 ARRAY_SIZE(memory_sort_dimensions), &len); 3553 s = strbuf_detach(&sb, NULL); 3554 strbuf_release(&sb); 3555 return s; 3556 } 3557