1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "event.h" 28 #include "time-utils.h" 29 #include "cgroup.h" 30 #include "machine.h" 31 #include <linux/kernel.h> 32 #include <linux/string.h> 33 34 #ifdef HAVE_LIBTRACEEVENT 35 #include <traceevent/event-parse.h> 36 #endif 37 38 regex_t parent_regex; 39 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 40 const char *parent_pattern = default_parent_pattern; 41 const char *default_sort_order = "comm,dso,symbol"; 42 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 43 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 44 const char default_top_sort_order[] = "dso,symbol"; 45 const char default_diff_sort_order[] = "dso,symbol"; 46 const char default_tracepoint_sort_order[] = "trace"; 47 const char *sort_order; 48 const char *field_order; 49 regex_t ignore_callees_regex; 50 int have_ignore_callees = 0; 51 enum sort_mode sort__mode = SORT_MODE__NORMAL; 52 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 53 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 54 55 /* 56 * Replaces all occurrences of a char used with the: 57 * 58 * -t, --field-separator 59 * 60 * option, that uses a special separator character and don't pad with spaces, 61 * replacing all occurrences of this separator in symbol names (and other 62 * output) with a '.' character, that thus it's the only non valid separator. 63 */ 64 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 65 { 66 int n; 67 va_list ap; 68 69 va_start(ap, fmt); 70 n = vsnprintf(bf, size, fmt, ap); 71 if (symbol_conf.field_sep && n > 0) { 72 char *sep = bf; 73 74 while (1) { 75 sep = strchr(sep, *symbol_conf.field_sep); 76 if (sep == NULL) 77 break; 78 *sep = '.'; 79 } 80 } 81 va_end(ap); 82 83 if (n >= (int)size) 84 return size - 1; 85 return n; 86 } 87 88 static int64_t cmp_null(const void *l, const void *r) 89 { 90 if (!l && !r) 91 return 0; 92 else if (!l) 93 return -1; 94 else 95 return 1; 96 } 97 98 /* --sort pid */ 99 100 static int64_t 101 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 102 { 103 return right->thread->tid - left->thread->tid; 104 } 105 106 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 107 size_t size, unsigned int width) 108 { 109 const char *comm = thread__comm_str(he->thread); 110 111 width = max(7U, width) - 8; 112 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 113 width, width, comm ?: ""); 114 } 115 116 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 117 { 118 const struct thread *th = arg; 119 120 if (type != HIST_FILTER__THREAD) 121 return -1; 122 123 return th && he->thread != th; 124 } 125 126 struct sort_entry sort_thread = { 127 .se_header = " Pid:Command", 128 .se_cmp = sort__thread_cmp, 129 .se_snprintf = hist_entry__thread_snprintf, 130 .se_filter = hist_entry__thread_filter, 131 .se_width_idx = HISTC_THREAD, 132 }; 133 134 /* --sort comm */ 135 136 /* 137 * We can't use pointer comparison in functions below, 138 * because it gives different results based on pointer 139 * values, which could break some sorting assumptions. 140 */ 141 static int64_t 142 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 143 { 144 return strcmp(comm__str(right->comm), comm__str(left->comm)); 145 } 146 147 static int64_t 148 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 149 { 150 return strcmp(comm__str(right->comm), comm__str(left->comm)); 151 } 152 153 static int64_t 154 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 155 { 156 return strcmp(comm__str(right->comm), comm__str(left->comm)); 157 } 158 159 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 160 size_t size, unsigned int width) 161 { 162 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 163 } 164 165 struct sort_entry sort_comm = { 166 .se_header = "Command", 167 .se_cmp = sort__comm_cmp, 168 .se_collapse = sort__comm_collapse, 169 .se_sort = sort__comm_sort, 170 .se_snprintf = hist_entry__comm_snprintf, 171 .se_filter = hist_entry__thread_filter, 172 .se_width_idx = HISTC_COMM, 173 }; 174 175 /* --sort dso */ 176 177 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 178 { 179 struct dso *dso_l = map_l ? map_l->dso : NULL; 180 struct dso *dso_r = map_r ? map_r->dso : NULL; 181 const char *dso_name_l, *dso_name_r; 182 183 if (!dso_l || !dso_r) 184 return cmp_null(dso_r, dso_l); 185 186 if (verbose > 0) { 187 dso_name_l = dso_l->long_name; 188 dso_name_r = dso_r->long_name; 189 } else { 190 dso_name_l = dso_l->short_name; 191 dso_name_r = dso_r->short_name; 192 } 193 194 return strcmp(dso_name_l, dso_name_r); 195 } 196 197 static int64_t 198 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 199 { 200 return _sort__dso_cmp(right->ms.map, left->ms.map); 201 } 202 203 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 204 size_t size, unsigned int width) 205 { 206 if (map && map->dso) { 207 const char *dso_name = verbose > 0 ? map->dso->long_name : 208 map->dso->short_name; 209 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 210 } 211 212 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 213 } 214 215 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 216 size_t size, unsigned int width) 217 { 218 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 219 } 220 221 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 222 { 223 const struct dso *dso = arg; 224 225 if (type != HIST_FILTER__DSO) 226 return -1; 227 228 return dso && (!he->ms.map || he->ms.map->dso != dso); 229 } 230 231 struct sort_entry sort_dso = { 232 .se_header = "Shared Object", 233 .se_cmp = sort__dso_cmp, 234 .se_snprintf = hist_entry__dso_snprintf, 235 .se_filter = hist_entry__dso_filter, 236 .se_width_idx = HISTC_DSO, 237 }; 238 239 /* --sort symbol */ 240 241 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 242 { 243 return (int64_t)(right_ip - left_ip); 244 } 245 246 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 247 { 248 if (!sym_l || !sym_r) 249 return cmp_null(sym_l, sym_r); 250 251 if (sym_l == sym_r) 252 return 0; 253 254 if (sym_l->inlined || sym_r->inlined) { 255 int ret = strcmp(sym_l->name, sym_r->name); 256 257 if (ret) 258 return ret; 259 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 260 return 0; 261 } 262 263 if (sym_l->start != sym_r->start) 264 return (int64_t)(sym_r->start - sym_l->start); 265 266 return (int64_t)(sym_r->end - sym_l->end); 267 } 268 269 static int64_t 270 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 271 { 272 int64_t ret; 273 274 if (!left->ms.sym && !right->ms.sym) 275 return _sort__addr_cmp(left->ip, right->ip); 276 277 /* 278 * comparing symbol address alone is not enough since it's a 279 * relative address within a dso. 280 */ 281 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 282 ret = sort__dso_cmp(left, right); 283 if (ret != 0) 284 return ret; 285 } 286 287 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 288 } 289 290 static int64_t 291 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 292 { 293 if (!left->ms.sym || !right->ms.sym) 294 return cmp_null(left->ms.sym, right->ms.sym); 295 296 return strcmp(right->ms.sym->name, left->ms.sym->name); 297 } 298 299 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 300 u64 ip, char level, char *bf, size_t size, 301 unsigned int width) 302 { 303 struct symbol *sym = ms->sym; 304 struct map *map = ms->map; 305 size_t ret = 0; 306 307 if (verbose > 0) { 308 char o = map ? dso__symtab_origin(map->dso) : '!'; 309 u64 rip = ip; 310 311 if (map && map->dso && map->dso->kernel 312 && map->dso->adjust_symbols) 313 rip = map->unmap_ip(map, ip); 314 315 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 316 BITS_PER_LONG / 4 + 2, rip, o); 317 } 318 319 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 320 if (sym && map) { 321 if (sym->type == STT_OBJECT) { 322 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 323 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 324 ip - map->unmap_ip(map, sym->start)); 325 } else { 326 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 327 width - ret, 328 sym->name); 329 if (sym->inlined) 330 ret += repsep_snprintf(bf + ret, size - ret, 331 " (inlined)"); 332 } 333 } else { 334 size_t len = BITS_PER_LONG / 4; 335 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 336 len, ip); 337 } 338 339 return ret; 340 } 341 342 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 343 { 344 return _hist_entry__sym_snprintf(&he->ms, he->ip, 345 he->level, bf, size, width); 346 } 347 348 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 349 { 350 const char *sym = arg; 351 352 if (type != HIST_FILTER__SYMBOL) 353 return -1; 354 355 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 356 } 357 358 struct sort_entry sort_sym = { 359 .se_header = "Symbol", 360 .se_cmp = sort__sym_cmp, 361 .se_sort = sort__sym_sort, 362 .se_snprintf = hist_entry__sym_snprintf, 363 .se_filter = hist_entry__sym_filter, 364 .se_width_idx = HISTC_SYMBOL, 365 }; 366 367 /* --sort srcline */ 368 369 char *hist_entry__srcline(struct hist_entry *he) 370 { 371 return map__srcline(he->ms.map, he->ip, he->ms.sym); 372 } 373 374 static int64_t 375 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 376 { 377 if (!left->srcline) 378 left->srcline = hist_entry__srcline(left); 379 if (!right->srcline) 380 right->srcline = hist_entry__srcline(right); 381 382 return strcmp(right->srcline, left->srcline); 383 } 384 385 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 386 size_t size, unsigned int width) 387 { 388 if (!he->srcline) 389 he->srcline = hist_entry__srcline(he); 390 391 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 392 } 393 394 struct sort_entry sort_srcline = { 395 .se_header = "Source:Line", 396 .se_cmp = sort__srcline_cmp, 397 .se_snprintf = hist_entry__srcline_snprintf, 398 .se_width_idx = HISTC_SRCLINE, 399 }; 400 401 /* --sort srcline_from */ 402 403 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 404 { 405 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 406 } 407 408 static int64_t 409 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 410 { 411 if (!left->branch_info->srcline_from) 412 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 413 414 if (!right->branch_info->srcline_from) 415 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 416 417 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 418 } 419 420 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 421 size_t size, unsigned int width) 422 { 423 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 424 } 425 426 struct sort_entry sort_srcline_from = { 427 .se_header = "From Source:Line", 428 .se_cmp = sort__srcline_from_cmp, 429 .se_snprintf = hist_entry__srcline_from_snprintf, 430 .se_width_idx = HISTC_SRCLINE_FROM, 431 }; 432 433 /* --sort srcline_to */ 434 435 static int64_t 436 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 437 { 438 if (!left->branch_info->srcline_to) 439 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 440 441 if (!right->branch_info->srcline_to) 442 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 443 444 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 445 } 446 447 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 448 size_t size, unsigned int width) 449 { 450 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 451 } 452 453 struct sort_entry sort_srcline_to = { 454 .se_header = "To Source:Line", 455 .se_cmp = sort__srcline_to_cmp, 456 .se_snprintf = hist_entry__srcline_to_snprintf, 457 .se_width_idx = HISTC_SRCLINE_TO, 458 }; 459 460 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 461 size_t size, unsigned int width) 462 { 463 464 struct symbol *sym = he->ms.sym; 465 struct annotation *notes; 466 double ipc = 0.0, coverage = 0.0; 467 char tmp[64]; 468 469 if (!sym) 470 return repsep_snprintf(bf, size, "%-*s", width, "-"); 471 472 notes = symbol__annotation(sym); 473 474 if (notes->hit_cycles) 475 ipc = notes->hit_insn / ((double)notes->hit_cycles); 476 477 if (notes->total_insn) { 478 coverage = notes->cover_insn * 100.0 / 479 ((double)notes->total_insn); 480 } 481 482 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 483 return repsep_snprintf(bf, size, "%-*s", width, tmp); 484 } 485 486 struct sort_entry sort_sym_ipc = { 487 .se_header = "IPC [IPC Coverage]", 488 .se_cmp = sort__sym_cmp, 489 .se_snprintf = hist_entry__sym_ipc_snprintf, 490 .se_width_idx = HISTC_SYMBOL_IPC, 491 }; 492 493 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 494 __maybe_unused, 495 char *bf, size_t size, 496 unsigned int width) 497 { 498 char tmp[64]; 499 500 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 501 return repsep_snprintf(bf, size, "%-*s", width, tmp); 502 } 503 504 struct sort_entry sort_sym_ipc_null = { 505 .se_header = "IPC [IPC Coverage]", 506 .se_cmp = sort__sym_cmp, 507 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 508 .se_width_idx = HISTC_SYMBOL_IPC, 509 }; 510 511 /* --sort srcfile */ 512 513 static char no_srcfile[1]; 514 515 static char *hist_entry__get_srcfile(struct hist_entry *e) 516 { 517 char *sf, *p; 518 struct map *map = e->ms.map; 519 520 if (!map) 521 return no_srcfile; 522 523 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 524 e->ms.sym, false, true, true, e->ip); 525 if (!strcmp(sf, SRCLINE_UNKNOWN)) 526 return no_srcfile; 527 p = strchr(sf, ':'); 528 if (p && *sf) { 529 *p = 0; 530 return sf; 531 } 532 free(sf); 533 return no_srcfile; 534 } 535 536 static int64_t 537 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 538 { 539 if (!left->srcfile) 540 left->srcfile = hist_entry__get_srcfile(left); 541 if (!right->srcfile) 542 right->srcfile = hist_entry__get_srcfile(right); 543 544 return strcmp(right->srcfile, left->srcfile); 545 } 546 547 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 548 size_t size, unsigned int width) 549 { 550 if (!he->srcfile) 551 he->srcfile = hist_entry__get_srcfile(he); 552 553 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 554 } 555 556 struct sort_entry sort_srcfile = { 557 .se_header = "Source File", 558 .se_cmp = sort__srcfile_cmp, 559 .se_snprintf = hist_entry__srcfile_snprintf, 560 .se_width_idx = HISTC_SRCFILE, 561 }; 562 563 /* --sort parent */ 564 565 static int64_t 566 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 567 { 568 struct symbol *sym_l = left->parent; 569 struct symbol *sym_r = right->parent; 570 571 if (!sym_l || !sym_r) 572 return cmp_null(sym_l, sym_r); 573 574 return strcmp(sym_r->name, sym_l->name); 575 } 576 577 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 578 size_t size, unsigned int width) 579 { 580 return repsep_snprintf(bf, size, "%-*.*s", width, width, 581 he->parent ? he->parent->name : "[other]"); 582 } 583 584 struct sort_entry sort_parent = { 585 .se_header = "Parent symbol", 586 .se_cmp = sort__parent_cmp, 587 .se_snprintf = hist_entry__parent_snprintf, 588 .se_width_idx = HISTC_PARENT, 589 }; 590 591 /* --sort cpu */ 592 593 static int64_t 594 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 595 { 596 return right->cpu - left->cpu; 597 } 598 599 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 600 size_t size, unsigned int width) 601 { 602 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 603 } 604 605 struct sort_entry sort_cpu = { 606 .se_header = "CPU", 607 .se_cmp = sort__cpu_cmp, 608 .se_snprintf = hist_entry__cpu_snprintf, 609 .se_width_idx = HISTC_CPU, 610 }; 611 612 /* --sort cgroup_id */ 613 614 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 615 { 616 return (int64_t)(right_dev - left_dev); 617 } 618 619 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 620 { 621 return (int64_t)(right_ino - left_ino); 622 } 623 624 static int64_t 625 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 626 { 627 int64_t ret; 628 629 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 630 if (ret != 0) 631 return ret; 632 633 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 634 left->cgroup_id.ino); 635 } 636 637 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 638 char *bf, size_t size, 639 unsigned int width __maybe_unused) 640 { 641 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 642 he->cgroup_id.ino); 643 } 644 645 struct sort_entry sort_cgroup_id = { 646 .se_header = "cgroup id (dev/inode)", 647 .se_cmp = sort__cgroup_id_cmp, 648 .se_snprintf = hist_entry__cgroup_id_snprintf, 649 .se_width_idx = HISTC_CGROUP_ID, 650 }; 651 652 /* --sort cgroup */ 653 654 static int64_t 655 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 656 { 657 return right->cgroup - left->cgroup; 658 } 659 660 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 661 char *bf, size_t size, 662 unsigned int width __maybe_unused) 663 { 664 const char *cgrp_name = "N/A"; 665 666 if (he->cgroup) { 667 struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env, 668 he->cgroup); 669 if (cgrp != NULL) 670 cgrp_name = cgrp->name; 671 else 672 cgrp_name = "unknown"; 673 } 674 675 return repsep_snprintf(bf, size, "%s", cgrp_name); 676 } 677 678 struct sort_entry sort_cgroup = { 679 .se_header = "Cgroup", 680 .se_cmp = sort__cgroup_cmp, 681 .se_snprintf = hist_entry__cgroup_snprintf, 682 .se_width_idx = HISTC_CGROUP, 683 }; 684 685 /* --sort socket */ 686 687 static int64_t 688 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 689 { 690 return right->socket - left->socket; 691 } 692 693 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 694 size_t size, unsigned int width) 695 { 696 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 697 } 698 699 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 700 { 701 int sk = *(const int *)arg; 702 703 if (type != HIST_FILTER__SOCKET) 704 return -1; 705 706 return sk >= 0 && he->socket != sk; 707 } 708 709 struct sort_entry sort_socket = { 710 .se_header = "Socket", 711 .se_cmp = sort__socket_cmp, 712 .se_snprintf = hist_entry__socket_snprintf, 713 .se_filter = hist_entry__socket_filter, 714 .se_width_idx = HISTC_SOCKET, 715 }; 716 717 /* --sort time */ 718 719 static int64_t 720 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 721 { 722 return right->time - left->time; 723 } 724 725 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 726 size_t size, unsigned int width) 727 { 728 char he_time[32]; 729 730 if (symbol_conf.nanosecs) 731 timestamp__scnprintf_nsec(he->time, he_time, 732 sizeof(he_time)); 733 else 734 timestamp__scnprintf_usec(he->time, he_time, 735 sizeof(he_time)); 736 737 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 738 } 739 740 struct sort_entry sort_time = { 741 .se_header = "Time", 742 .se_cmp = sort__time_cmp, 743 .se_snprintf = hist_entry__time_snprintf, 744 .se_width_idx = HISTC_TIME, 745 }; 746 747 /* --sort trace */ 748 749 #ifdef HAVE_LIBTRACEEVENT 750 static char *get_trace_output(struct hist_entry *he) 751 { 752 struct trace_seq seq; 753 struct evsel *evsel; 754 struct tep_record rec = { 755 .data = he->raw_data, 756 .size = he->raw_size, 757 }; 758 759 evsel = hists_to_evsel(he->hists); 760 761 trace_seq_init(&seq); 762 if (symbol_conf.raw_trace) { 763 tep_print_fields(&seq, he->raw_data, he->raw_size, 764 evsel->tp_format); 765 } else { 766 tep_print_event(evsel->tp_format->tep, 767 &seq, &rec, "%s", TEP_PRINT_INFO); 768 } 769 /* 770 * Trim the buffer, it starts at 4KB and we're not going to 771 * add anything more to this buffer. 772 */ 773 return realloc(seq.buffer, seq.len + 1); 774 } 775 776 static int64_t 777 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 778 { 779 struct evsel *evsel; 780 781 evsel = hists_to_evsel(left->hists); 782 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 783 return 0; 784 785 if (left->trace_output == NULL) 786 left->trace_output = get_trace_output(left); 787 if (right->trace_output == NULL) 788 right->trace_output = get_trace_output(right); 789 790 return strcmp(right->trace_output, left->trace_output); 791 } 792 793 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 794 size_t size, unsigned int width) 795 { 796 struct evsel *evsel; 797 798 evsel = hists_to_evsel(he->hists); 799 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 800 return scnprintf(bf, size, "%-.*s", width, "N/A"); 801 802 if (he->trace_output == NULL) 803 he->trace_output = get_trace_output(he); 804 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 805 } 806 807 struct sort_entry sort_trace = { 808 .se_header = "Trace output", 809 .se_cmp = sort__trace_cmp, 810 .se_snprintf = hist_entry__trace_snprintf, 811 .se_width_idx = HISTC_TRACE, 812 }; 813 #endif /* HAVE_LIBTRACEEVENT */ 814 815 /* sort keys for branch stacks */ 816 817 static int64_t 818 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 819 { 820 if (!left->branch_info || !right->branch_info) 821 return cmp_null(left->branch_info, right->branch_info); 822 823 return _sort__dso_cmp(left->branch_info->from.ms.map, 824 right->branch_info->from.ms.map); 825 } 826 827 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 828 size_t size, unsigned int width) 829 { 830 if (he->branch_info) 831 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 832 bf, size, width); 833 else 834 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 835 } 836 837 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 838 const void *arg) 839 { 840 const struct dso *dso = arg; 841 842 if (type != HIST_FILTER__DSO) 843 return -1; 844 845 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 846 he->branch_info->from.ms.map->dso != dso); 847 } 848 849 static int64_t 850 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 851 { 852 if (!left->branch_info || !right->branch_info) 853 return cmp_null(left->branch_info, right->branch_info); 854 855 return _sort__dso_cmp(left->branch_info->to.ms.map, 856 right->branch_info->to.ms.map); 857 } 858 859 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 860 size_t size, unsigned int width) 861 { 862 if (he->branch_info) 863 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 864 bf, size, width); 865 else 866 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 867 } 868 869 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 870 const void *arg) 871 { 872 const struct dso *dso = arg; 873 874 if (type != HIST_FILTER__DSO) 875 return -1; 876 877 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 878 he->branch_info->to.ms.map->dso != dso); 879 } 880 881 static int64_t 882 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 883 { 884 struct addr_map_symbol *from_l = &left->branch_info->from; 885 struct addr_map_symbol *from_r = &right->branch_info->from; 886 887 if (!left->branch_info || !right->branch_info) 888 return cmp_null(left->branch_info, right->branch_info); 889 890 from_l = &left->branch_info->from; 891 from_r = &right->branch_info->from; 892 893 if (!from_l->ms.sym && !from_r->ms.sym) 894 return _sort__addr_cmp(from_l->addr, from_r->addr); 895 896 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 897 } 898 899 static int64_t 900 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 901 { 902 struct addr_map_symbol *to_l, *to_r; 903 904 if (!left->branch_info || !right->branch_info) 905 return cmp_null(left->branch_info, right->branch_info); 906 907 to_l = &left->branch_info->to; 908 to_r = &right->branch_info->to; 909 910 if (!to_l->ms.sym && !to_r->ms.sym) 911 return _sort__addr_cmp(to_l->addr, to_r->addr); 912 913 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 914 } 915 916 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 917 size_t size, unsigned int width) 918 { 919 if (he->branch_info) { 920 struct addr_map_symbol *from = &he->branch_info->from; 921 922 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 923 from->al_level, bf, size, width); 924 } 925 926 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 927 } 928 929 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 930 size_t size, unsigned int width) 931 { 932 if (he->branch_info) { 933 struct addr_map_symbol *to = &he->branch_info->to; 934 935 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 936 to->al_level, bf, size, width); 937 } 938 939 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 940 } 941 942 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 943 const void *arg) 944 { 945 const char *sym = arg; 946 947 if (type != HIST_FILTER__SYMBOL) 948 return -1; 949 950 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 951 strstr(he->branch_info->from.ms.sym->name, sym)); 952 } 953 954 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 955 const void *arg) 956 { 957 const char *sym = arg; 958 959 if (type != HIST_FILTER__SYMBOL) 960 return -1; 961 962 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 963 strstr(he->branch_info->to.ms.sym->name, sym)); 964 } 965 966 struct sort_entry sort_dso_from = { 967 .se_header = "Source Shared Object", 968 .se_cmp = sort__dso_from_cmp, 969 .se_snprintf = hist_entry__dso_from_snprintf, 970 .se_filter = hist_entry__dso_from_filter, 971 .se_width_idx = HISTC_DSO_FROM, 972 }; 973 974 struct sort_entry sort_dso_to = { 975 .se_header = "Target Shared Object", 976 .se_cmp = sort__dso_to_cmp, 977 .se_snprintf = hist_entry__dso_to_snprintf, 978 .se_filter = hist_entry__dso_to_filter, 979 .se_width_idx = HISTC_DSO_TO, 980 }; 981 982 struct sort_entry sort_sym_from = { 983 .se_header = "Source Symbol", 984 .se_cmp = sort__sym_from_cmp, 985 .se_snprintf = hist_entry__sym_from_snprintf, 986 .se_filter = hist_entry__sym_from_filter, 987 .se_width_idx = HISTC_SYMBOL_FROM, 988 }; 989 990 struct sort_entry sort_sym_to = { 991 .se_header = "Target Symbol", 992 .se_cmp = sort__sym_to_cmp, 993 .se_snprintf = hist_entry__sym_to_snprintf, 994 .se_filter = hist_entry__sym_to_filter, 995 .se_width_idx = HISTC_SYMBOL_TO, 996 }; 997 998 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 999 u64 ip, char level, char *bf, size_t size, 1000 unsigned int width) 1001 { 1002 struct symbol *sym = ms->sym; 1003 struct map *map = ms->map; 1004 size_t ret = 0, offs; 1005 1006 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1007 if (sym && map) { 1008 if (sym->type == STT_OBJECT) { 1009 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1010 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1011 ip - map->unmap_ip(map, sym->start)); 1012 } else { 1013 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1014 width - ret, 1015 sym->name); 1016 offs = ip - sym->start; 1017 if (offs) 1018 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1019 } 1020 } else { 1021 size_t len = BITS_PER_LONG / 4; 1022 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1023 len, ip); 1024 } 1025 1026 return ret; 1027 } 1028 1029 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1030 size_t size, unsigned int width) 1031 { 1032 if (he->branch_info) { 1033 struct addr_map_symbol *from = &he->branch_info->from; 1034 1035 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1036 he->level, bf, size, width); 1037 } 1038 1039 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1040 } 1041 1042 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1043 size_t size, unsigned int width) 1044 { 1045 if (he->branch_info) { 1046 struct addr_map_symbol *to = &he->branch_info->to; 1047 1048 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1049 he->level, bf, size, width); 1050 } 1051 1052 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1053 } 1054 1055 static int64_t 1056 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1057 { 1058 struct addr_map_symbol *from_l; 1059 struct addr_map_symbol *from_r; 1060 int64_t ret; 1061 1062 if (!left->branch_info || !right->branch_info) 1063 return cmp_null(left->branch_info, right->branch_info); 1064 1065 from_l = &left->branch_info->from; 1066 from_r = &right->branch_info->from; 1067 1068 /* 1069 * comparing symbol address alone is not enough since it's a 1070 * relative address within a dso. 1071 */ 1072 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1073 if (ret != 0) 1074 return ret; 1075 1076 return _sort__addr_cmp(from_l->addr, from_r->addr); 1077 } 1078 1079 static int64_t 1080 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1081 { 1082 struct addr_map_symbol *to_l; 1083 struct addr_map_symbol *to_r; 1084 int64_t ret; 1085 1086 if (!left->branch_info || !right->branch_info) 1087 return cmp_null(left->branch_info, right->branch_info); 1088 1089 to_l = &left->branch_info->to; 1090 to_r = &right->branch_info->to; 1091 1092 /* 1093 * comparing symbol address alone is not enough since it's a 1094 * relative address within a dso. 1095 */ 1096 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1097 if (ret != 0) 1098 return ret; 1099 1100 return _sort__addr_cmp(to_l->addr, to_r->addr); 1101 } 1102 1103 struct sort_entry sort_addr_from = { 1104 .se_header = "Source Address", 1105 .se_cmp = sort__addr_from_cmp, 1106 .se_snprintf = hist_entry__addr_from_snprintf, 1107 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1108 .se_width_idx = HISTC_ADDR_FROM, 1109 }; 1110 1111 struct sort_entry sort_addr_to = { 1112 .se_header = "Target Address", 1113 .se_cmp = sort__addr_to_cmp, 1114 .se_snprintf = hist_entry__addr_to_snprintf, 1115 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1116 .se_width_idx = HISTC_ADDR_TO, 1117 }; 1118 1119 1120 static int64_t 1121 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1122 { 1123 unsigned char mp, p; 1124 1125 if (!left->branch_info || !right->branch_info) 1126 return cmp_null(left->branch_info, right->branch_info); 1127 1128 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1129 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1130 return mp || p; 1131 } 1132 1133 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1134 size_t size, unsigned int width){ 1135 static const char *out = "N/A"; 1136 1137 if (he->branch_info) { 1138 if (he->branch_info->flags.predicted) 1139 out = "N"; 1140 else if (he->branch_info->flags.mispred) 1141 out = "Y"; 1142 } 1143 1144 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1145 } 1146 1147 static int64_t 1148 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1149 { 1150 if (!left->branch_info || !right->branch_info) 1151 return cmp_null(left->branch_info, right->branch_info); 1152 1153 return left->branch_info->flags.cycles - 1154 right->branch_info->flags.cycles; 1155 } 1156 1157 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1158 size_t size, unsigned int width) 1159 { 1160 if (!he->branch_info) 1161 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1162 if (he->branch_info->flags.cycles == 0) 1163 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1164 return repsep_snprintf(bf, size, "%-*hd", width, 1165 he->branch_info->flags.cycles); 1166 } 1167 1168 struct sort_entry sort_cycles = { 1169 .se_header = "Basic Block Cycles", 1170 .se_cmp = sort__cycles_cmp, 1171 .se_snprintf = hist_entry__cycles_snprintf, 1172 .se_width_idx = HISTC_CYCLES, 1173 }; 1174 1175 /* --sort daddr_sym */ 1176 int64_t 1177 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1178 { 1179 uint64_t l = 0, r = 0; 1180 1181 if (left->mem_info) 1182 l = left->mem_info->daddr.addr; 1183 if (right->mem_info) 1184 r = right->mem_info->daddr.addr; 1185 1186 return (int64_t)(r - l); 1187 } 1188 1189 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1190 size_t size, unsigned int width) 1191 { 1192 uint64_t addr = 0; 1193 struct map_symbol *ms = NULL; 1194 1195 if (he->mem_info) { 1196 addr = he->mem_info->daddr.addr; 1197 ms = &he->mem_info->daddr.ms; 1198 } 1199 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1200 } 1201 1202 int64_t 1203 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1204 { 1205 uint64_t l = 0, r = 0; 1206 1207 if (left->mem_info) 1208 l = left->mem_info->iaddr.addr; 1209 if (right->mem_info) 1210 r = right->mem_info->iaddr.addr; 1211 1212 return (int64_t)(r - l); 1213 } 1214 1215 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1216 size_t size, unsigned int width) 1217 { 1218 uint64_t addr = 0; 1219 struct map_symbol *ms = NULL; 1220 1221 if (he->mem_info) { 1222 addr = he->mem_info->iaddr.addr; 1223 ms = &he->mem_info->iaddr.ms; 1224 } 1225 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1226 } 1227 1228 static int64_t 1229 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1230 { 1231 struct map *map_l = NULL; 1232 struct map *map_r = NULL; 1233 1234 if (left->mem_info) 1235 map_l = left->mem_info->daddr.ms.map; 1236 if (right->mem_info) 1237 map_r = right->mem_info->daddr.ms.map; 1238 1239 return _sort__dso_cmp(map_l, map_r); 1240 } 1241 1242 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1243 size_t size, unsigned int width) 1244 { 1245 struct map *map = NULL; 1246 1247 if (he->mem_info) 1248 map = he->mem_info->daddr.ms.map; 1249 1250 return _hist_entry__dso_snprintf(map, bf, size, width); 1251 } 1252 1253 static int64_t 1254 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1255 { 1256 union perf_mem_data_src data_src_l; 1257 union perf_mem_data_src data_src_r; 1258 1259 if (left->mem_info) 1260 data_src_l = left->mem_info->data_src; 1261 else 1262 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1263 1264 if (right->mem_info) 1265 data_src_r = right->mem_info->data_src; 1266 else 1267 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1268 1269 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1270 } 1271 1272 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1273 size_t size, unsigned int width) 1274 { 1275 char out[10]; 1276 1277 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1278 return repsep_snprintf(bf, size, "%.*s", width, out); 1279 } 1280 1281 static int64_t 1282 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1283 { 1284 union perf_mem_data_src data_src_l; 1285 union perf_mem_data_src data_src_r; 1286 1287 if (left->mem_info) 1288 data_src_l = left->mem_info->data_src; 1289 else 1290 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1291 1292 if (right->mem_info) 1293 data_src_r = right->mem_info->data_src; 1294 else 1295 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1296 1297 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1298 } 1299 1300 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1301 size_t size, unsigned int width) 1302 { 1303 char out[64]; 1304 1305 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1306 return repsep_snprintf(bf, size, "%-*s", width, out); 1307 } 1308 1309 static int64_t 1310 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1311 { 1312 union perf_mem_data_src data_src_l; 1313 union perf_mem_data_src data_src_r; 1314 1315 if (left->mem_info) 1316 data_src_l = left->mem_info->data_src; 1317 else 1318 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1319 1320 if (right->mem_info) 1321 data_src_r = right->mem_info->data_src; 1322 else 1323 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1324 1325 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1326 } 1327 1328 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1329 size_t size, unsigned int width) 1330 { 1331 char out[64]; 1332 1333 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1334 return repsep_snprintf(bf, size, "%-*s", width, out); 1335 } 1336 1337 static int64_t 1338 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1339 { 1340 union perf_mem_data_src data_src_l; 1341 union perf_mem_data_src data_src_r; 1342 1343 if (left->mem_info) 1344 data_src_l = left->mem_info->data_src; 1345 else 1346 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1347 1348 if (right->mem_info) 1349 data_src_r = right->mem_info->data_src; 1350 else 1351 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1352 1353 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1354 } 1355 1356 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1357 size_t size, unsigned int width) 1358 { 1359 char out[64]; 1360 1361 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1362 return repsep_snprintf(bf, size, "%-*s", width, out); 1363 } 1364 1365 int64_t 1366 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1367 { 1368 u64 l, r; 1369 struct map *l_map, *r_map; 1370 int rc; 1371 1372 if (!left->mem_info) return -1; 1373 if (!right->mem_info) return 1; 1374 1375 /* group event types together */ 1376 if (left->cpumode > right->cpumode) return -1; 1377 if (left->cpumode < right->cpumode) return 1; 1378 1379 l_map = left->mem_info->daddr.ms.map; 1380 r_map = right->mem_info->daddr.ms.map; 1381 1382 /* if both are NULL, jump to sort on al_addr instead */ 1383 if (!l_map && !r_map) 1384 goto addr; 1385 1386 if (!l_map) return -1; 1387 if (!r_map) return 1; 1388 1389 rc = dso__cmp_id(l_map->dso, r_map->dso); 1390 if (rc) 1391 return rc; 1392 /* 1393 * Addresses with no major/minor numbers are assumed to be 1394 * anonymous in userspace. Sort those on pid then address. 1395 * 1396 * The kernel and non-zero major/minor mapped areas are 1397 * assumed to be unity mapped. Sort those on address. 1398 */ 1399 1400 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1401 (!(l_map->flags & MAP_SHARED)) && 1402 !l_map->dso->id.maj && !l_map->dso->id.min && 1403 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) { 1404 /* userspace anonymous */ 1405 1406 if (left->thread->pid_ > right->thread->pid_) return -1; 1407 if (left->thread->pid_ < right->thread->pid_) return 1; 1408 } 1409 1410 addr: 1411 /* al_addr does all the right addr - start + offset calculations */ 1412 l = cl_address(left->mem_info->daddr.al_addr); 1413 r = cl_address(right->mem_info->daddr.al_addr); 1414 1415 if (l > r) return -1; 1416 if (l < r) return 1; 1417 1418 return 0; 1419 } 1420 1421 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1422 size_t size, unsigned int width) 1423 { 1424 1425 uint64_t addr = 0; 1426 struct map_symbol *ms = NULL; 1427 char level = he->level; 1428 1429 if (he->mem_info) { 1430 struct map *map = he->mem_info->daddr.ms.map; 1431 1432 addr = cl_address(he->mem_info->daddr.al_addr); 1433 ms = &he->mem_info->daddr.ms; 1434 1435 /* print [s] for shared data mmaps */ 1436 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1437 map && !(map->prot & PROT_EXEC) && 1438 (map->flags & MAP_SHARED) && 1439 (map->dso->id.maj || map->dso->id.min || 1440 map->dso->id.ino || map->dso->id.ino_generation)) 1441 level = 's'; 1442 else if (!map) 1443 level = 'X'; 1444 } 1445 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1446 } 1447 1448 struct sort_entry sort_mispredict = { 1449 .se_header = "Branch Mispredicted", 1450 .se_cmp = sort__mispredict_cmp, 1451 .se_snprintf = hist_entry__mispredict_snprintf, 1452 .se_width_idx = HISTC_MISPREDICT, 1453 }; 1454 1455 static int64_t 1456 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1457 { 1458 return left->weight - right->weight; 1459 } 1460 1461 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1462 size_t size, unsigned int width) 1463 { 1464 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1465 } 1466 1467 struct sort_entry sort_local_weight = { 1468 .se_header = "Local Weight", 1469 .se_cmp = sort__weight_cmp, 1470 .se_snprintf = hist_entry__local_weight_snprintf, 1471 .se_width_idx = HISTC_LOCAL_WEIGHT, 1472 }; 1473 1474 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1475 size_t size, unsigned int width) 1476 { 1477 return repsep_snprintf(bf, size, "%-*llu", width, 1478 he->weight * he->stat.nr_events); 1479 } 1480 1481 struct sort_entry sort_global_weight = { 1482 .se_header = "Weight", 1483 .se_cmp = sort__weight_cmp, 1484 .se_snprintf = hist_entry__global_weight_snprintf, 1485 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1486 }; 1487 1488 static int64_t 1489 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1490 { 1491 return left->ins_lat - right->ins_lat; 1492 } 1493 1494 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1495 size_t size, unsigned int width) 1496 { 1497 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1498 } 1499 1500 struct sort_entry sort_local_ins_lat = { 1501 .se_header = "Local INSTR Latency", 1502 .se_cmp = sort__ins_lat_cmp, 1503 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1504 .se_width_idx = HISTC_LOCAL_INS_LAT, 1505 }; 1506 1507 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1508 size_t size, unsigned int width) 1509 { 1510 return repsep_snprintf(bf, size, "%-*u", width, 1511 he->ins_lat * he->stat.nr_events); 1512 } 1513 1514 struct sort_entry sort_global_ins_lat = { 1515 .se_header = "INSTR Latency", 1516 .se_cmp = sort__ins_lat_cmp, 1517 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1518 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1519 }; 1520 1521 static int64_t 1522 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1523 { 1524 return left->p_stage_cyc - right->p_stage_cyc; 1525 } 1526 1527 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1528 size_t size, unsigned int width) 1529 { 1530 return repsep_snprintf(bf, size, "%-*u", width, 1531 he->p_stage_cyc * he->stat.nr_events); 1532 } 1533 1534 1535 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1536 size_t size, unsigned int width) 1537 { 1538 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1539 } 1540 1541 struct sort_entry sort_local_p_stage_cyc = { 1542 .se_header = "Local Pipeline Stage Cycle", 1543 .se_cmp = sort__p_stage_cyc_cmp, 1544 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1545 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1546 }; 1547 1548 struct sort_entry sort_global_p_stage_cyc = { 1549 .se_header = "Pipeline Stage Cycle", 1550 .se_cmp = sort__p_stage_cyc_cmp, 1551 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1552 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1553 }; 1554 1555 struct sort_entry sort_mem_daddr_sym = { 1556 .se_header = "Data Symbol", 1557 .se_cmp = sort__daddr_cmp, 1558 .se_snprintf = hist_entry__daddr_snprintf, 1559 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1560 }; 1561 1562 struct sort_entry sort_mem_iaddr_sym = { 1563 .se_header = "Code Symbol", 1564 .se_cmp = sort__iaddr_cmp, 1565 .se_snprintf = hist_entry__iaddr_snprintf, 1566 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1567 }; 1568 1569 struct sort_entry sort_mem_daddr_dso = { 1570 .se_header = "Data Object", 1571 .se_cmp = sort__dso_daddr_cmp, 1572 .se_snprintf = hist_entry__dso_daddr_snprintf, 1573 .se_width_idx = HISTC_MEM_DADDR_DSO, 1574 }; 1575 1576 struct sort_entry sort_mem_locked = { 1577 .se_header = "Locked", 1578 .se_cmp = sort__locked_cmp, 1579 .se_snprintf = hist_entry__locked_snprintf, 1580 .se_width_idx = HISTC_MEM_LOCKED, 1581 }; 1582 1583 struct sort_entry sort_mem_tlb = { 1584 .se_header = "TLB access", 1585 .se_cmp = sort__tlb_cmp, 1586 .se_snprintf = hist_entry__tlb_snprintf, 1587 .se_width_idx = HISTC_MEM_TLB, 1588 }; 1589 1590 struct sort_entry sort_mem_lvl = { 1591 .se_header = "Memory access", 1592 .se_cmp = sort__lvl_cmp, 1593 .se_snprintf = hist_entry__lvl_snprintf, 1594 .se_width_idx = HISTC_MEM_LVL, 1595 }; 1596 1597 struct sort_entry sort_mem_snoop = { 1598 .se_header = "Snoop", 1599 .se_cmp = sort__snoop_cmp, 1600 .se_snprintf = hist_entry__snoop_snprintf, 1601 .se_width_idx = HISTC_MEM_SNOOP, 1602 }; 1603 1604 struct sort_entry sort_mem_dcacheline = { 1605 .se_header = "Data Cacheline", 1606 .se_cmp = sort__dcacheline_cmp, 1607 .se_snprintf = hist_entry__dcacheline_snprintf, 1608 .se_width_idx = HISTC_MEM_DCACHELINE, 1609 }; 1610 1611 static int64_t 1612 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1613 { 1614 union perf_mem_data_src data_src_l; 1615 union perf_mem_data_src data_src_r; 1616 1617 if (left->mem_info) 1618 data_src_l = left->mem_info->data_src; 1619 else 1620 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1621 1622 if (right->mem_info) 1623 data_src_r = right->mem_info->data_src; 1624 else 1625 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1626 1627 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1628 } 1629 1630 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1631 size_t size, unsigned int width) 1632 { 1633 char out[16]; 1634 1635 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1636 return repsep_snprintf(bf, size, "%.*s", width, out); 1637 } 1638 1639 struct sort_entry sort_mem_blocked = { 1640 .se_header = "Blocked", 1641 .se_cmp = sort__blocked_cmp, 1642 .se_snprintf = hist_entry__blocked_snprintf, 1643 .se_width_idx = HISTC_MEM_BLOCKED, 1644 }; 1645 1646 static int64_t 1647 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1648 { 1649 uint64_t l = 0, r = 0; 1650 1651 if (left->mem_info) 1652 l = left->mem_info->daddr.phys_addr; 1653 if (right->mem_info) 1654 r = right->mem_info->daddr.phys_addr; 1655 1656 return (int64_t)(r - l); 1657 } 1658 1659 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1660 size_t size, unsigned int width) 1661 { 1662 uint64_t addr = 0; 1663 size_t ret = 0; 1664 size_t len = BITS_PER_LONG / 4; 1665 1666 addr = he->mem_info->daddr.phys_addr; 1667 1668 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1669 1670 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1671 1672 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1673 1674 if (ret > width) 1675 bf[width] = '\0'; 1676 1677 return width; 1678 } 1679 1680 struct sort_entry sort_mem_phys_daddr = { 1681 .se_header = "Data Physical Address", 1682 .se_cmp = sort__phys_daddr_cmp, 1683 .se_snprintf = hist_entry__phys_daddr_snprintf, 1684 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1685 }; 1686 1687 static int64_t 1688 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1689 { 1690 uint64_t l = 0, r = 0; 1691 1692 if (left->mem_info) 1693 l = left->mem_info->daddr.data_page_size; 1694 if (right->mem_info) 1695 r = right->mem_info->daddr.data_page_size; 1696 1697 return (int64_t)(r - l); 1698 } 1699 1700 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1701 size_t size, unsigned int width) 1702 { 1703 char str[PAGE_SIZE_NAME_LEN]; 1704 1705 return repsep_snprintf(bf, size, "%-*s", width, 1706 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1707 } 1708 1709 struct sort_entry sort_mem_data_page_size = { 1710 .se_header = "Data Page Size", 1711 .se_cmp = sort__data_page_size_cmp, 1712 .se_snprintf = hist_entry__data_page_size_snprintf, 1713 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1714 }; 1715 1716 static int64_t 1717 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1718 { 1719 uint64_t l = left->code_page_size; 1720 uint64_t r = right->code_page_size; 1721 1722 return (int64_t)(r - l); 1723 } 1724 1725 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1726 size_t size, unsigned int width) 1727 { 1728 char str[PAGE_SIZE_NAME_LEN]; 1729 1730 return repsep_snprintf(bf, size, "%-*s", width, 1731 get_page_size_name(he->code_page_size, str)); 1732 } 1733 1734 struct sort_entry sort_code_page_size = { 1735 .se_header = "Code Page Size", 1736 .se_cmp = sort__code_page_size_cmp, 1737 .se_snprintf = hist_entry__code_page_size_snprintf, 1738 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1739 }; 1740 1741 static int64_t 1742 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1743 { 1744 if (!left->branch_info || !right->branch_info) 1745 return cmp_null(left->branch_info, right->branch_info); 1746 1747 return left->branch_info->flags.abort != 1748 right->branch_info->flags.abort; 1749 } 1750 1751 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1752 size_t size, unsigned int width) 1753 { 1754 static const char *out = "N/A"; 1755 1756 if (he->branch_info) { 1757 if (he->branch_info->flags.abort) 1758 out = "A"; 1759 else 1760 out = "."; 1761 } 1762 1763 return repsep_snprintf(bf, size, "%-*s", width, out); 1764 } 1765 1766 struct sort_entry sort_abort = { 1767 .se_header = "Transaction abort", 1768 .se_cmp = sort__abort_cmp, 1769 .se_snprintf = hist_entry__abort_snprintf, 1770 .se_width_idx = HISTC_ABORT, 1771 }; 1772 1773 static int64_t 1774 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1775 { 1776 if (!left->branch_info || !right->branch_info) 1777 return cmp_null(left->branch_info, right->branch_info); 1778 1779 return left->branch_info->flags.in_tx != 1780 right->branch_info->flags.in_tx; 1781 } 1782 1783 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1784 size_t size, unsigned int width) 1785 { 1786 static const char *out = "N/A"; 1787 1788 if (he->branch_info) { 1789 if (he->branch_info->flags.in_tx) 1790 out = "T"; 1791 else 1792 out = "."; 1793 } 1794 1795 return repsep_snprintf(bf, size, "%-*s", width, out); 1796 } 1797 1798 struct sort_entry sort_in_tx = { 1799 .se_header = "Branch in transaction", 1800 .se_cmp = sort__in_tx_cmp, 1801 .se_snprintf = hist_entry__in_tx_snprintf, 1802 .se_width_idx = HISTC_IN_TX, 1803 }; 1804 1805 static int64_t 1806 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1807 { 1808 return left->transaction - right->transaction; 1809 } 1810 1811 static inline char *add_str(char *p, const char *str) 1812 { 1813 strcpy(p, str); 1814 return p + strlen(str); 1815 } 1816 1817 static struct txbit { 1818 unsigned flag; 1819 const char *name; 1820 int skip_for_len; 1821 } txbits[] = { 1822 { PERF_TXN_ELISION, "EL ", 0 }, 1823 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1824 { PERF_TXN_SYNC, "SYNC ", 1 }, 1825 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1826 { PERF_TXN_RETRY, "RETRY ", 0 }, 1827 { PERF_TXN_CONFLICT, "CON ", 0 }, 1828 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1829 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1830 { 0, NULL, 0 } 1831 }; 1832 1833 int hist_entry__transaction_len(void) 1834 { 1835 int i; 1836 int len = 0; 1837 1838 for (i = 0; txbits[i].name; i++) { 1839 if (!txbits[i].skip_for_len) 1840 len += strlen(txbits[i].name); 1841 } 1842 len += 4; /* :XX<space> */ 1843 return len; 1844 } 1845 1846 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1847 size_t size, unsigned int width) 1848 { 1849 u64 t = he->transaction; 1850 char buf[128]; 1851 char *p = buf; 1852 int i; 1853 1854 buf[0] = 0; 1855 for (i = 0; txbits[i].name; i++) 1856 if (txbits[i].flag & t) 1857 p = add_str(p, txbits[i].name); 1858 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1859 p = add_str(p, "NEITHER "); 1860 if (t & PERF_TXN_ABORT_MASK) { 1861 sprintf(p, ":%" PRIx64, 1862 (t & PERF_TXN_ABORT_MASK) >> 1863 PERF_TXN_ABORT_SHIFT); 1864 p += strlen(p); 1865 } 1866 1867 return repsep_snprintf(bf, size, "%-*s", width, buf); 1868 } 1869 1870 struct sort_entry sort_transaction = { 1871 .se_header = "Transaction ", 1872 .se_cmp = sort__transaction_cmp, 1873 .se_snprintf = hist_entry__transaction_snprintf, 1874 .se_width_idx = HISTC_TRANSACTION, 1875 }; 1876 1877 /* --sort symbol_size */ 1878 1879 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1880 { 1881 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1882 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1883 1884 return size_l < size_r ? -1 : 1885 size_l == size_r ? 0 : 1; 1886 } 1887 1888 static int64_t 1889 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1890 { 1891 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1892 } 1893 1894 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1895 size_t bf_size, unsigned int width) 1896 { 1897 if (sym) 1898 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1899 1900 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1901 } 1902 1903 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1904 size_t size, unsigned int width) 1905 { 1906 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1907 } 1908 1909 struct sort_entry sort_sym_size = { 1910 .se_header = "Symbol size", 1911 .se_cmp = sort__sym_size_cmp, 1912 .se_snprintf = hist_entry__sym_size_snprintf, 1913 .se_width_idx = HISTC_SYM_SIZE, 1914 }; 1915 1916 /* --sort dso_size */ 1917 1918 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1919 { 1920 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1921 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1922 1923 return size_l < size_r ? -1 : 1924 size_l == size_r ? 0 : 1; 1925 } 1926 1927 static int64_t 1928 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1929 { 1930 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1931 } 1932 1933 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1934 size_t bf_size, unsigned int width) 1935 { 1936 if (map && map->dso) 1937 return repsep_snprintf(bf, bf_size, "%*d", width, 1938 map__size(map)); 1939 1940 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1941 } 1942 1943 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1944 size_t size, unsigned int width) 1945 { 1946 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1947 } 1948 1949 struct sort_entry sort_dso_size = { 1950 .se_header = "DSO size", 1951 .se_cmp = sort__dso_size_cmp, 1952 .se_snprintf = hist_entry__dso_size_snprintf, 1953 .se_width_idx = HISTC_DSO_SIZE, 1954 }; 1955 1956 /* --sort dso_size */ 1957 1958 static int64_t 1959 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 1960 { 1961 u64 left_ip = left->ip; 1962 u64 right_ip = right->ip; 1963 struct map *left_map = left->ms.map; 1964 struct map *right_map = right->ms.map; 1965 1966 if (left_map) 1967 left_ip = left_map->unmap_ip(left_map, left_ip); 1968 if (right_map) 1969 right_ip = right_map->unmap_ip(right_map, right_ip); 1970 1971 return _sort__addr_cmp(left_ip, right_ip); 1972 } 1973 1974 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 1975 size_t size, unsigned int width) 1976 { 1977 u64 ip = he->ip; 1978 struct map *map = he->ms.map; 1979 1980 if (map) 1981 ip = map->unmap_ip(map, ip); 1982 1983 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 1984 } 1985 1986 struct sort_entry sort_addr = { 1987 .se_header = "Address", 1988 .se_cmp = sort__addr_cmp, 1989 .se_snprintf = hist_entry__addr_snprintf, 1990 .se_width_idx = HISTC_ADDR, 1991 }; 1992 1993 1994 struct sort_dimension { 1995 const char *name; 1996 struct sort_entry *entry; 1997 int taken; 1998 }; 1999 2000 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2001 { 2002 return 0; 2003 } 2004 2005 const char * __weak arch_perf_header_entry(const char *se_header) 2006 { 2007 return se_header; 2008 } 2009 2010 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2011 { 2012 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2013 } 2014 2015 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2016 2017 static struct sort_dimension common_sort_dimensions[] = { 2018 DIM(SORT_PID, "pid", sort_thread), 2019 DIM(SORT_COMM, "comm", sort_comm), 2020 DIM(SORT_DSO, "dso", sort_dso), 2021 DIM(SORT_SYM, "symbol", sort_sym), 2022 DIM(SORT_PARENT, "parent", sort_parent), 2023 DIM(SORT_CPU, "cpu", sort_cpu), 2024 DIM(SORT_SOCKET, "socket", sort_socket), 2025 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2026 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2027 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2028 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2029 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2030 #ifdef HAVE_LIBTRACEEVENT 2031 DIM(SORT_TRACE, "trace", sort_trace), 2032 #endif 2033 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2034 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2035 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2036 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2037 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2038 DIM(SORT_TIME, "time", sort_time), 2039 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2040 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2041 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2042 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2043 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2044 DIM(SORT_ADDR, "addr", sort_addr), 2045 }; 2046 2047 #undef DIM 2048 2049 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2050 2051 static struct sort_dimension bstack_sort_dimensions[] = { 2052 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2053 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2054 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2055 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2056 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2057 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2058 DIM(SORT_ABORT, "abort", sort_abort), 2059 DIM(SORT_CYCLES, "cycles", sort_cycles), 2060 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2061 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2062 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2063 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2064 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2065 }; 2066 2067 #undef DIM 2068 2069 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2070 2071 static struct sort_dimension memory_sort_dimensions[] = { 2072 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2073 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2074 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2075 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2076 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2077 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2078 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2079 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2080 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2081 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2082 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2083 }; 2084 2085 #undef DIM 2086 2087 struct hpp_dimension { 2088 const char *name; 2089 struct perf_hpp_fmt *fmt; 2090 int taken; 2091 }; 2092 2093 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2094 2095 static struct hpp_dimension hpp_sort_dimensions[] = { 2096 DIM(PERF_HPP__OVERHEAD, "overhead"), 2097 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2098 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2099 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2100 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2101 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2102 DIM(PERF_HPP__SAMPLES, "sample"), 2103 DIM(PERF_HPP__PERIOD, "period"), 2104 }; 2105 2106 #undef DIM 2107 2108 struct hpp_sort_entry { 2109 struct perf_hpp_fmt hpp; 2110 struct sort_entry *se; 2111 }; 2112 2113 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2114 { 2115 struct hpp_sort_entry *hse; 2116 2117 if (!perf_hpp__is_sort_entry(fmt)) 2118 return; 2119 2120 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2121 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2122 } 2123 2124 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2125 struct hists *hists, int line __maybe_unused, 2126 int *span __maybe_unused) 2127 { 2128 struct hpp_sort_entry *hse; 2129 size_t len = fmt->user_len; 2130 2131 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2132 2133 if (!len) 2134 len = hists__col_len(hists, hse->se->se_width_idx); 2135 2136 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2137 } 2138 2139 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2140 struct perf_hpp *hpp __maybe_unused, 2141 struct hists *hists) 2142 { 2143 struct hpp_sort_entry *hse; 2144 size_t len = fmt->user_len; 2145 2146 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2147 2148 if (!len) 2149 len = hists__col_len(hists, hse->se->se_width_idx); 2150 2151 return len; 2152 } 2153 2154 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2155 struct hist_entry *he) 2156 { 2157 struct hpp_sort_entry *hse; 2158 size_t len = fmt->user_len; 2159 2160 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2161 2162 if (!len) 2163 len = hists__col_len(he->hists, hse->se->se_width_idx); 2164 2165 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2166 } 2167 2168 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2169 struct hist_entry *a, struct hist_entry *b) 2170 { 2171 struct hpp_sort_entry *hse; 2172 2173 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2174 return hse->se->se_cmp(a, b); 2175 } 2176 2177 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2178 struct hist_entry *a, struct hist_entry *b) 2179 { 2180 struct hpp_sort_entry *hse; 2181 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2182 2183 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2184 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2185 return collapse_fn(a, b); 2186 } 2187 2188 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2189 struct hist_entry *a, struct hist_entry *b) 2190 { 2191 struct hpp_sort_entry *hse; 2192 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2193 2194 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2195 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2196 return sort_fn(a, b); 2197 } 2198 2199 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2200 { 2201 return format->header == __sort__hpp_header; 2202 } 2203 2204 #define MK_SORT_ENTRY_CHK(key) \ 2205 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2206 { \ 2207 struct hpp_sort_entry *hse; \ 2208 \ 2209 if (!perf_hpp__is_sort_entry(fmt)) \ 2210 return false; \ 2211 \ 2212 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2213 return hse->se == &sort_ ## key ; \ 2214 } 2215 2216 #ifdef HAVE_LIBTRACEEVENT 2217 MK_SORT_ENTRY_CHK(trace) 2218 #else 2219 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2220 { 2221 return false; 2222 } 2223 #endif 2224 MK_SORT_ENTRY_CHK(srcline) 2225 MK_SORT_ENTRY_CHK(srcfile) 2226 MK_SORT_ENTRY_CHK(thread) 2227 MK_SORT_ENTRY_CHK(comm) 2228 MK_SORT_ENTRY_CHK(dso) 2229 MK_SORT_ENTRY_CHK(sym) 2230 2231 2232 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2233 { 2234 struct hpp_sort_entry *hse_a; 2235 struct hpp_sort_entry *hse_b; 2236 2237 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2238 return false; 2239 2240 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2241 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2242 2243 return hse_a->se == hse_b->se; 2244 } 2245 2246 static void hse_free(struct perf_hpp_fmt *fmt) 2247 { 2248 struct hpp_sort_entry *hse; 2249 2250 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2251 free(hse); 2252 } 2253 2254 static struct hpp_sort_entry * 2255 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2256 { 2257 struct hpp_sort_entry *hse; 2258 2259 hse = malloc(sizeof(*hse)); 2260 if (hse == NULL) { 2261 pr_err("Memory allocation failed\n"); 2262 return NULL; 2263 } 2264 2265 hse->se = sd->entry; 2266 hse->hpp.name = sd->entry->se_header; 2267 hse->hpp.header = __sort__hpp_header; 2268 hse->hpp.width = __sort__hpp_width; 2269 hse->hpp.entry = __sort__hpp_entry; 2270 hse->hpp.color = NULL; 2271 2272 hse->hpp.cmp = __sort__hpp_cmp; 2273 hse->hpp.collapse = __sort__hpp_collapse; 2274 hse->hpp.sort = __sort__hpp_sort; 2275 hse->hpp.equal = __sort__hpp_equal; 2276 hse->hpp.free = hse_free; 2277 2278 INIT_LIST_HEAD(&hse->hpp.list); 2279 INIT_LIST_HEAD(&hse->hpp.sort_list); 2280 hse->hpp.elide = false; 2281 hse->hpp.len = 0; 2282 hse->hpp.user_len = 0; 2283 hse->hpp.level = level; 2284 2285 return hse; 2286 } 2287 2288 static void hpp_free(struct perf_hpp_fmt *fmt) 2289 { 2290 free(fmt); 2291 } 2292 2293 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2294 int level) 2295 { 2296 struct perf_hpp_fmt *fmt; 2297 2298 fmt = memdup(hd->fmt, sizeof(*fmt)); 2299 if (fmt) { 2300 INIT_LIST_HEAD(&fmt->list); 2301 INIT_LIST_HEAD(&fmt->sort_list); 2302 fmt->free = hpp_free; 2303 fmt->level = level; 2304 } 2305 2306 return fmt; 2307 } 2308 2309 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2310 { 2311 struct perf_hpp_fmt *fmt; 2312 struct hpp_sort_entry *hse; 2313 int ret = -1; 2314 int r; 2315 2316 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2317 if (!perf_hpp__is_sort_entry(fmt)) 2318 continue; 2319 2320 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2321 if (hse->se->se_filter == NULL) 2322 continue; 2323 2324 /* 2325 * hist entry is filtered if any of sort key in the hpp list 2326 * is applied. But it should skip non-matched filter types. 2327 */ 2328 r = hse->se->se_filter(he, type, arg); 2329 if (r >= 0) { 2330 if (ret < 0) 2331 ret = 0; 2332 ret |= r; 2333 } 2334 } 2335 2336 return ret; 2337 } 2338 2339 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2340 struct perf_hpp_list *list, 2341 int level) 2342 { 2343 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2344 2345 if (hse == NULL) 2346 return -1; 2347 2348 perf_hpp_list__register_sort_field(list, &hse->hpp); 2349 return 0; 2350 } 2351 2352 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2353 struct perf_hpp_list *list) 2354 { 2355 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2356 2357 if (hse == NULL) 2358 return -1; 2359 2360 perf_hpp_list__column_register(list, &hse->hpp); 2361 return 0; 2362 } 2363 2364 #ifndef HAVE_LIBTRACEEVENT 2365 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2366 { 2367 return false; 2368 } 2369 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2370 struct hists *hists __maybe_unused) 2371 { 2372 return false; 2373 } 2374 #else 2375 struct hpp_dynamic_entry { 2376 struct perf_hpp_fmt hpp; 2377 struct evsel *evsel; 2378 struct tep_format_field *field; 2379 unsigned dynamic_len; 2380 bool raw_trace; 2381 }; 2382 2383 static int hde_width(struct hpp_dynamic_entry *hde) 2384 { 2385 if (!hde->hpp.len) { 2386 int len = hde->dynamic_len; 2387 int namelen = strlen(hde->field->name); 2388 int fieldlen = hde->field->size; 2389 2390 if (namelen > len) 2391 len = namelen; 2392 2393 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2394 /* length for print hex numbers */ 2395 fieldlen = hde->field->size * 2 + 2; 2396 } 2397 if (fieldlen > len) 2398 len = fieldlen; 2399 2400 hde->hpp.len = len; 2401 } 2402 return hde->hpp.len; 2403 } 2404 2405 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2406 struct hist_entry *he) 2407 { 2408 char *str, *pos; 2409 struct tep_format_field *field = hde->field; 2410 size_t namelen; 2411 bool last = false; 2412 2413 if (hde->raw_trace) 2414 return; 2415 2416 /* parse pretty print result and update max length */ 2417 if (!he->trace_output) 2418 he->trace_output = get_trace_output(he); 2419 2420 namelen = strlen(field->name); 2421 str = he->trace_output; 2422 2423 while (str) { 2424 pos = strchr(str, ' '); 2425 if (pos == NULL) { 2426 last = true; 2427 pos = str + strlen(str); 2428 } 2429 2430 if (!strncmp(str, field->name, namelen)) { 2431 size_t len; 2432 2433 str += namelen + 1; 2434 len = pos - str; 2435 2436 if (len > hde->dynamic_len) 2437 hde->dynamic_len = len; 2438 break; 2439 } 2440 2441 if (last) 2442 str = NULL; 2443 else 2444 str = pos + 1; 2445 } 2446 } 2447 2448 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2449 struct hists *hists __maybe_unused, 2450 int line __maybe_unused, 2451 int *span __maybe_unused) 2452 { 2453 struct hpp_dynamic_entry *hde; 2454 size_t len = fmt->user_len; 2455 2456 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2457 2458 if (!len) 2459 len = hde_width(hde); 2460 2461 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2462 } 2463 2464 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2465 struct perf_hpp *hpp __maybe_unused, 2466 struct hists *hists __maybe_unused) 2467 { 2468 struct hpp_dynamic_entry *hde; 2469 size_t len = fmt->user_len; 2470 2471 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2472 2473 if (!len) 2474 len = hde_width(hde); 2475 2476 return len; 2477 } 2478 2479 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2480 { 2481 struct hpp_dynamic_entry *hde; 2482 2483 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2484 2485 return hists_to_evsel(hists) == hde->evsel; 2486 } 2487 2488 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2489 struct hist_entry *he) 2490 { 2491 struct hpp_dynamic_entry *hde; 2492 size_t len = fmt->user_len; 2493 char *str, *pos; 2494 struct tep_format_field *field; 2495 size_t namelen; 2496 bool last = false; 2497 int ret; 2498 2499 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2500 2501 if (!len) 2502 len = hde_width(hde); 2503 2504 if (hde->raw_trace) 2505 goto raw_field; 2506 2507 if (!he->trace_output) 2508 he->trace_output = get_trace_output(he); 2509 2510 field = hde->field; 2511 namelen = strlen(field->name); 2512 str = he->trace_output; 2513 2514 while (str) { 2515 pos = strchr(str, ' '); 2516 if (pos == NULL) { 2517 last = true; 2518 pos = str + strlen(str); 2519 } 2520 2521 if (!strncmp(str, field->name, namelen)) { 2522 str += namelen + 1; 2523 str = strndup(str, pos - str); 2524 2525 if (str == NULL) 2526 return scnprintf(hpp->buf, hpp->size, 2527 "%*.*s", len, len, "ERROR"); 2528 break; 2529 } 2530 2531 if (last) 2532 str = NULL; 2533 else 2534 str = pos + 1; 2535 } 2536 2537 if (str == NULL) { 2538 struct trace_seq seq; 2539 raw_field: 2540 trace_seq_init(&seq); 2541 tep_print_field(&seq, he->raw_data, hde->field); 2542 str = seq.buffer; 2543 } 2544 2545 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2546 free(str); 2547 return ret; 2548 } 2549 2550 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2551 struct hist_entry *a, struct hist_entry *b) 2552 { 2553 struct hpp_dynamic_entry *hde; 2554 struct tep_format_field *field; 2555 unsigned offset, size; 2556 2557 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2558 2559 if (b == NULL) { 2560 update_dynamic_len(hde, a); 2561 return 0; 2562 } 2563 2564 field = hde->field; 2565 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2566 unsigned long long dyn; 2567 2568 tep_read_number_field(field, a->raw_data, &dyn); 2569 offset = dyn & 0xffff; 2570 size = (dyn >> 16) & 0xffff; 2571 #ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE 2572 if (field->flags & TEP_FIELD_IS_RELATIVE) 2573 offset += field->offset + field->size; 2574 #endif 2575 /* record max width for output */ 2576 if (size > hde->dynamic_len) 2577 hde->dynamic_len = size; 2578 } else { 2579 offset = field->offset; 2580 size = field->size; 2581 } 2582 2583 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2584 } 2585 2586 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2587 { 2588 return fmt->cmp == __sort__hde_cmp; 2589 } 2590 2591 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2592 { 2593 struct hpp_dynamic_entry *hde_a; 2594 struct hpp_dynamic_entry *hde_b; 2595 2596 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2597 return false; 2598 2599 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2600 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2601 2602 return hde_a->field == hde_b->field; 2603 } 2604 2605 static void hde_free(struct perf_hpp_fmt *fmt) 2606 { 2607 struct hpp_dynamic_entry *hde; 2608 2609 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2610 free(hde); 2611 } 2612 2613 static struct hpp_dynamic_entry * 2614 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2615 int level) 2616 { 2617 struct hpp_dynamic_entry *hde; 2618 2619 hde = malloc(sizeof(*hde)); 2620 if (hde == NULL) { 2621 pr_debug("Memory allocation failed\n"); 2622 return NULL; 2623 } 2624 2625 hde->evsel = evsel; 2626 hde->field = field; 2627 hde->dynamic_len = 0; 2628 2629 hde->hpp.name = field->name; 2630 hde->hpp.header = __sort__hde_header; 2631 hde->hpp.width = __sort__hde_width; 2632 hde->hpp.entry = __sort__hde_entry; 2633 hde->hpp.color = NULL; 2634 2635 hde->hpp.cmp = __sort__hde_cmp; 2636 hde->hpp.collapse = __sort__hde_cmp; 2637 hde->hpp.sort = __sort__hde_cmp; 2638 hde->hpp.equal = __sort__hde_equal; 2639 hde->hpp.free = hde_free; 2640 2641 INIT_LIST_HEAD(&hde->hpp.list); 2642 INIT_LIST_HEAD(&hde->hpp.sort_list); 2643 hde->hpp.elide = false; 2644 hde->hpp.len = 0; 2645 hde->hpp.user_len = 0; 2646 hde->hpp.level = level; 2647 2648 return hde; 2649 } 2650 #endif /* HAVE_LIBTRACEEVENT */ 2651 2652 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2653 { 2654 struct perf_hpp_fmt *new_fmt = NULL; 2655 2656 if (perf_hpp__is_sort_entry(fmt)) { 2657 struct hpp_sort_entry *hse, *new_hse; 2658 2659 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2660 new_hse = memdup(hse, sizeof(*hse)); 2661 if (new_hse) 2662 new_fmt = &new_hse->hpp; 2663 #ifdef HAVE_LIBTRACEEVENT 2664 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2665 struct hpp_dynamic_entry *hde, *new_hde; 2666 2667 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2668 new_hde = memdup(hde, sizeof(*hde)); 2669 if (new_hde) 2670 new_fmt = &new_hde->hpp; 2671 #endif 2672 } else { 2673 new_fmt = memdup(fmt, sizeof(*fmt)); 2674 } 2675 2676 INIT_LIST_HEAD(&new_fmt->list); 2677 INIT_LIST_HEAD(&new_fmt->sort_list); 2678 2679 return new_fmt; 2680 } 2681 2682 static int parse_field_name(char *str, char **event, char **field, char **opt) 2683 { 2684 char *event_name, *field_name, *opt_name; 2685 2686 event_name = str; 2687 field_name = strchr(str, '.'); 2688 2689 if (field_name) { 2690 *field_name++ = '\0'; 2691 } else { 2692 event_name = NULL; 2693 field_name = str; 2694 } 2695 2696 opt_name = strchr(field_name, '/'); 2697 if (opt_name) 2698 *opt_name++ = '\0'; 2699 2700 *event = event_name; 2701 *field = field_name; 2702 *opt = opt_name; 2703 2704 return 0; 2705 } 2706 2707 /* find match evsel using a given event name. The event name can be: 2708 * 1. '%' + event index (e.g. '%1' for first event) 2709 * 2. full event name (e.g. sched:sched_switch) 2710 * 3. partial event name (should not contain ':') 2711 */ 2712 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2713 { 2714 struct evsel *evsel = NULL; 2715 struct evsel *pos; 2716 bool full_name; 2717 2718 /* case 1 */ 2719 if (event_name[0] == '%') { 2720 int nr = strtol(event_name+1, NULL, 0); 2721 2722 if (nr > evlist->core.nr_entries) 2723 return NULL; 2724 2725 evsel = evlist__first(evlist); 2726 while (--nr > 0) 2727 evsel = evsel__next(evsel); 2728 2729 return evsel; 2730 } 2731 2732 full_name = !!strchr(event_name, ':'); 2733 evlist__for_each_entry(evlist, pos) { 2734 /* case 2 */ 2735 if (full_name && !strcmp(pos->name, event_name)) 2736 return pos; 2737 /* case 3 */ 2738 if (!full_name && strstr(pos->name, event_name)) { 2739 if (evsel) { 2740 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2741 event_name, evsel->name, pos->name); 2742 return NULL; 2743 } 2744 evsel = pos; 2745 } 2746 } 2747 2748 return evsel; 2749 } 2750 2751 #ifdef HAVE_LIBTRACEEVENT 2752 static int __dynamic_dimension__add(struct evsel *evsel, 2753 struct tep_format_field *field, 2754 bool raw_trace, int level) 2755 { 2756 struct hpp_dynamic_entry *hde; 2757 2758 hde = __alloc_dynamic_entry(evsel, field, level); 2759 if (hde == NULL) 2760 return -ENOMEM; 2761 2762 hde->raw_trace = raw_trace; 2763 2764 perf_hpp__register_sort_field(&hde->hpp); 2765 return 0; 2766 } 2767 2768 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2769 { 2770 int ret; 2771 struct tep_format_field *field; 2772 2773 field = evsel->tp_format->format.fields; 2774 while (field) { 2775 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2776 if (ret < 0) 2777 return ret; 2778 2779 field = field->next; 2780 } 2781 return 0; 2782 } 2783 2784 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2785 int level) 2786 { 2787 int ret; 2788 struct evsel *evsel; 2789 2790 evlist__for_each_entry(evlist, evsel) { 2791 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2792 continue; 2793 2794 ret = add_evsel_fields(evsel, raw_trace, level); 2795 if (ret < 0) 2796 return ret; 2797 } 2798 return 0; 2799 } 2800 2801 static int add_all_matching_fields(struct evlist *evlist, 2802 char *field_name, bool raw_trace, int level) 2803 { 2804 int ret = -ESRCH; 2805 struct evsel *evsel; 2806 struct tep_format_field *field; 2807 2808 evlist__for_each_entry(evlist, evsel) { 2809 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2810 continue; 2811 2812 field = tep_find_any_field(evsel->tp_format, field_name); 2813 if (field == NULL) 2814 continue; 2815 2816 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2817 if (ret < 0) 2818 break; 2819 } 2820 return ret; 2821 } 2822 #endif /* HAVE_LIBTRACEEVENT */ 2823 2824 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2825 int level) 2826 { 2827 char *str, *event_name, *field_name, *opt_name; 2828 struct evsel *evsel; 2829 bool raw_trace = symbol_conf.raw_trace; 2830 int ret = 0; 2831 2832 if (evlist == NULL) 2833 return -ENOENT; 2834 2835 str = strdup(tok); 2836 if (str == NULL) 2837 return -ENOMEM; 2838 2839 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2840 ret = -EINVAL; 2841 goto out; 2842 } 2843 2844 if (opt_name) { 2845 if (strcmp(opt_name, "raw")) { 2846 pr_debug("unsupported field option %s\n", opt_name); 2847 ret = -EINVAL; 2848 goto out; 2849 } 2850 raw_trace = true; 2851 } 2852 2853 #ifdef HAVE_LIBTRACEEVENT 2854 if (!strcmp(field_name, "trace_fields")) { 2855 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2856 goto out; 2857 } 2858 2859 if (event_name == NULL) { 2860 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2861 goto out; 2862 } 2863 #endif 2864 2865 evsel = find_evsel(evlist, event_name); 2866 if (evsel == NULL) { 2867 pr_debug("Cannot find event: %s\n", event_name); 2868 ret = -ENOENT; 2869 goto out; 2870 } 2871 2872 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2873 pr_debug("%s is not a tracepoint event\n", event_name); 2874 ret = -EINVAL; 2875 goto out; 2876 } 2877 2878 #ifdef HAVE_LIBTRACEEVENT 2879 if (!strcmp(field_name, "*")) { 2880 ret = add_evsel_fields(evsel, raw_trace, level); 2881 } else { 2882 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 2883 2884 if (field == NULL) { 2885 pr_debug("Cannot find event field for %s.%s\n", 2886 event_name, field_name); 2887 return -ENOENT; 2888 } 2889 2890 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2891 } 2892 #else 2893 (void)level; 2894 (void)raw_trace; 2895 #endif /* HAVE_LIBTRACEEVENT */ 2896 2897 out: 2898 free(str); 2899 return ret; 2900 } 2901 2902 static int __sort_dimension__add(struct sort_dimension *sd, 2903 struct perf_hpp_list *list, 2904 int level) 2905 { 2906 if (sd->taken) 2907 return 0; 2908 2909 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2910 return -1; 2911 2912 if (sd->entry->se_collapse) 2913 list->need_collapse = 1; 2914 2915 sd->taken = 1; 2916 2917 return 0; 2918 } 2919 2920 static int __hpp_dimension__add(struct hpp_dimension *hd, 2921 struct perf_hpp_list *list, 2922 int level) 2923 { 2924 struct perf_hpp_fmt *fmt; 2925 2926 if (hd->taken) 2927 return 0; 2928 2929 fmt = __hpp_dimension__alloc_hpp(hd, level); 2930 if (!fmt) 2931 return -1; 2932 2933 hd->taken = 1; 2934 perf_hpp_list__register_sort_field(list, fmt); 2935 return 0; 2936 } 2937 2938 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2939 struct sort_dimension *sd) 2940 { 2941 if (sd->taken) 2942 return 0; 2943 2944 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2945 return -1; 2946 2947 sd->taken = 1; 2948 return 0; 2949 } 2950 2951 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2952 struct hpp_dimension *hd) 2953 { 2954 struct perf_hpp_fmt *fmt; 2955 2956 if (hd->taken) 2957 return 0; 2958 2959 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2960 if (!fmt) 2961 return -1; 2962 2963 hd->taken = 1; 2964 perf_hpp_list__column_register(list, fmt); 2965 return 0; 2966 } 2967 2968 int hpp_dimension__add_output(unsigned col) 2969 { 2970 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2971 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2972 } 2973 2974 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2975 struct evlist *evlist, 2976 int level) 2977 { 2978 unsigned int i, j; 2979 2980 /* 2981 * Check to see if there are any arch specific 2982 * sort dimensions not applicable for the current 2983 * architecture. If so, Skip that sort key since 2984 * we don't want to display it in the output fields. 2985 */ 2986 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 2987 if (!strcmp(arch_specific_sort_keys[j], tok) && 2988 !arch_support_sort_key(tok)) { 2989 return 0; 2990 } 2991 } 2992 2993 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2994 struct sort_dimension *sd = &common_sort_dimensions[i]; 2995 2996 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 2997 continue; 2998 2999 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3000 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3001 sort_dimension_add_dynamic_header(sd); 3002 } 3003 3004 if (sd->entry == &sort_parent) { 3005 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3006 if (ret) { 3007 char err[BUFSIZ]; 3008 3009 regerror(ret, &parent_regex, err, sizeof(err)); 3010 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3011 return -EINVAL; 3012 } 3013 list->parent = 1; 3014 } else if (sd->entry == &sort_sym) { 3015 list->sym = 1; 3016 /* 3017 * perf diff displays the performance difference amongst 3018 * two or more perf.data files. Those files could come 3019 * from different binaries. So we should not compare 3020 * their ips, but the name of symbol. 3021 */ 3022 if (sort__mode == SORT_MODE__DIFF) 3023 sd->entry->se_collapse = sort__sym_sort; 3024 3025 } else if (sd->entry == &sort_dso) { 3026 list->dso = 1; 3027 } else if (sd->entry == &sort_socket) { 3028 list->socket = 1; 3029 } else if (sd->entry == &sort_thread) { 3030 list->thread = 1; 3031 } else if (sd->entry == &sort_comm) { 3032 list->comm = 1; 3033 } 3034 3035 return __sort_dimension__add(sd, list, level); 3036 } 3037 3038 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3039 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3040 3041 if (strncasecmp(tok, hd->name, strlen(tok))) 3042 continue; 3043 3044 return __hpp_dimension__add(hd, list, level); 3045 } 3046 3047 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3048 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3049 3050 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3051 continue; 3052 3053 if (sort__mode != SORT_MODE__BRANCH) 3054 return -EINVAL; 3055 3056 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3057 list->sym = 1; 3058 3059 __sort_dimension__add(sd, list, level); 3060 return 0; 3061 } 3062 3063 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3064 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3065 3066 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3067 continue; 3068 3069 if (sort__mode != SORT_MODE__MEMORY) 3070 return -EINVAL; 3071 3072 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3073 return -EINVAL; 3074 3075 if (sd->entry == &sort_mem_daddr_sym) 3076 list->sym = 1; 3077 3078 __sort_dimension__add(sd, list, level); 3079 return 0; 3080 } 3081 3082 if (!add_dynamic_entry(evlist, tok, level)) 3083 return 0; 3084 3085 return -ESRCH; 3086 } 3087 3088 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3089 struct evlist *evlist) 3090 { 3091 char *tmp, *tok; 3092 int ret = 0; 3093 int level = 0; 3094 int next_level = 1; 3095 bool in_group = false; 3096 3097 do { 3098 tok = str; 3099 tmp = strpbrk(str, "{}, "); 3100 if (tmp) { 3101 if (in_group) 3102 next_level = level; 3103 else 3104 next_level = level + 1; 3105 3106 if (*tmp == '{') 3107 in_group = true; 3108 else if (*tmp == '}') 3109 in_group = false; 3110 3111 *tmp = '\0'; 3112 str = tmp + 1; 3113 } 3114 3115 if (*tok) { 3116 ret = sort_dimension__add(list, tok, evlist, level); 3117 if (ret == -EINVAL) { 3118 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3119 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3120 else 3121 ui__error("Invalid --sort key: `%s'", tok); 3122 break; 3123 } else if (ret == -ESRCH) { 3124 ui__error("Unknown --sort key: `%s'", tok); 3125 break; 3126 } 3127 } 3128 3129 level = next_level; 3130 } while (tmp); 3131 3132 return ret; 3133 } 3134 3135 static const char *get_default_sort_order(struct evlist *evlist) 3136 { 3137 const char *default_sort_orders[] = { 3138 default_sort_order, 3139 default_branch_sort_order, 3140 default_mem_sort_order, 3141 default_top_sort_order, 3142 default_diff_sort_order, 3143 default_tracepoint_sort_order, 3144 }; 3145 bool use_trace = true; 3146 struct evsel *evsel; 3147 3148 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3149 3150 if (evlist == NULL || evlist__empty(evlist)) 3151 goto out_no_evlist; 3152 3153 evlist__for_each_entry(evlist, evsel) { 3154 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3155 use_trace = false; 3156 break; 3157 } 3158 } 3159 3160 if (use_trace) { 3161 sort__mode = SORT_MODE__TRACEPOINT; 3162 if (symbol_conf.raw_trace) 3163 return "trace_fields"; 3164 } 3165 out_no_evlist: 3166 return default_sort_orders[sort__mode]; 3167 } 3168 3169 static int setup_sort_order(struct evlist *evlist) 3170 { 3171 char *new_sort_order; 3172 3173 /* 3174 * Append '+'-prefixed sort order to the default sort 3175 * order string. 3176 */ 3177 if (!sort_order || is_strict_order(sort_order)) 3178 return 0; 3179 3180 if (sort_order[1] == '\0') { 3181 ui__error("Invalid --sort key: `+'"); 3182 return -EINVAL; 3183 } 3184 3185 /* 3186 * We allocate new sort_order string, but we never free it, 3187 * because it's checked over the rest of the code. 3188 */ 3189 if (asprintf(&new_sort_order, "%s,%s", 3190 get_default_sort_order(evlist), sort_order + 1) < 0) { 3191 pr_err("Not enough memory to set up --sort"); 3192 return -ENOMEM; 3193 } 3194 3195 sort_order = new_sort_order; 3196 return 0; 3197 } 3198 3199 /* 3200 * Adds 'pre,' prefix into 'str' is 'pre' is 3201 * not already part of 'str'. 3202 */ 3203 static char *prefix_if_not_in(const char *pre, char *str) 3204 { 3205 char *n; 3206 3207 if (!str || strstr(str, pre)) 3208 return str; 3209 3210 if (asprintf(&n, "%s,%s", pre, str) < 0) 3211 n = NULL; 3212 3213 free(str); 3214 return n; 3215 } 3216 3217 static char *setup_overhead(char *keys) 3218 { 3219 if (sort__mode == SORT_MODE__DIFF) 3220 return keys; 3221 3222 keys = prefix_if_not_in("overhead", keys); 3223 3224 if (symbol_conf.cumulate_callchain) 3225 keys = prefix_if_not_in("overhead_children", keys); 3226 3227 return keys; 3228 } 3229 3230 static int __setup_sorting(struct evlist *evlist) 3231 { 3232 char *str; 3233 const char *sort_keys; 3234 int ret = 0; 3235 3236 ret = setup_sort_order(evlist); 3237 if (ret) 3238 return ret; 3239 3240 sort_keys = sort_order; 3241 if (sort_keys == NULL) { 3242 if (is_strict_order(field_order)) { 3243 /* 3244 * If user specified field order but no sort order, 3245 * we'll honor it and not add default sort orders. 3246 */ 3247 return 0; 3248 } 3249 3250 sort_keys = get_default_sort_order(evlist); 3251 } 3252 3253 str = strdup(sort_keys); 3254 if (str == NULL) { 3255 pr_err("Not enough memory to setup sort keys"); 3256 return -ENOMEM; 3257 } 3258 3259 /* 3260 * Prepend overhead fields for backward compatibility. 3261 */ 3262 if (!is_strict_order(field_order)) { 3263 str = setup_overhead(str); 3264 if (str == NULL) { 3265 pr_err("Not enough memory to setup overhead keys"); 3266 return -ENOMEM; 3267 } 3268 } 3269 3270 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3271 3272 free(str); 3273 return ret; 3274 } 3275 3276 void perf_hpp__set_elide(int idx, bool elide) 3277 { 3278 struct perf_hpp_fmt *fmt; 3279 struct hpp_sort_entry *hse; 3280 3281 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3282 if (!perf_hpp__is_sort_entry(fmt)) 3283 continue; 3284 3285 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3286 if (hse->se->se_width_idx == idx) { 3287 fmt->elide = elide; 3288 break; 3289 } 3290 } 3291 } 3292 3293 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3294 { 3295 if (list && strlist__nr_entries(list) == 1) { 3296 if (fp != NULL) 3297 fprintf(fp, "# %s: %s\n", list_name, 3298 strlist__entry(list, 0)->s); 3299 return true; 3300 } 3301 return false; 3302 } 3303 3304 static bool get_elide(int idx, FILE *output) 3305 { 3306 switch (idx) { 3307 case HISTC_SYMBOL: 3308 return __get_elide(symbol_conf.sym_list, "symbol", output); 3309 case HISTC_DSO: 3310 return __get_elide(symbol_conf.dso_list, "dso", output); 3311 case HISTC_COMM: 3312 return __get_elide(symbol_conf.comm_list, "comm", output); 3313 default: 3314 break; 3315 } 3316 3317 if (sort__mode != SORT_MODE__BRANCH) 3318 return false; 3319 3320 switch (idx) { 3321 case HISTC_SYMBOL_FROM: 3322 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3323 case HISTC_SYMBOL_TO: 3324 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3325 case HISTC_DSO_FROM: 3326 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3327 case HISTC_DSO_TO: 3328 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3329 case HISTC_ADDR_FROM: 3330 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3331 case HISTC_ADDR_TO: 3332 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3333 default: 3334 break; 3335 } 3336 3337 return false; 3338 } 3339 3340 void sort__setup_elide(FILE *output) 3341 { 3342 struct perf_hpp_fmt *fmt; 3343 struct hpp_sort_entry *hse; 3344 3345 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3346 if (!perf_hpp__is_sort_entry(fmt)) 3347 continue; 3348 3349 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3350 fmt->elide = get_elide(hse->se->se_width_idx, output); 3351 } 3352 3353 /* 3354 * It makes no sense to elide all of sort entries. 3355 * Just revert them to show up again. 3356 */ 3357 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3358 if (!perf_hpp__is_sort_entry(fmt)) 3359 continue; 3360 3361 if (!fmt->elide) 3362 return; 3363 } 3364 3365 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3366 if (!perf_hpp__is_sort_entry(fmt)) 3367 continue; 3368 3369 fmt->elide = false; 3370 } 3371 } 3372 3373 int output_field_add(struct perf_hpp_list *list, char *tok) 3374 { 3375 unsigned int i; 3376 3377 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3378 struct sort_dimension *sd = &common_sort_dimensions[i]; 3379 3380 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3381 continue; 3382 3383 return __sort_dimension__add_output(list, sd); 3384 } 3385 3386 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3387 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3388 3389 if (strncasecmp(tok, hd->name, strlen(tok))) 3390 continue; 3391 3392 return __hpp_dimension__add_output(list, hd); 3393 } 3394 3395 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3396 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3397 3398 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3399 continue; 3400 3401 if (sort__mode != SORT_MODE__BRANCH) 3402 return -EINVAL; 3403 3404 return __sort_dimension__add_output(list, sd); 3405 } 3406 3407 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3408 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3409 3410 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3411 continue; 3412 3413 if (sort__mode != SORT_MODE__MEMORY) 3414 return -EINVAL; 3415 3416 return __sort_dimension__add_output(list, sd); 3417 } 3418 3419 return -ESRCH; 3420 } 3421 3422 static int setup_output_list(struct perf_hpp_list *list, char *str) 3423 { 3424 char *tmp, *tok; 3425 int ret = 0; 3426 3427 for (tok = strtok_r(str, ", ", &tmp); 3428 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3429 ret = output_field_add(list, tok); 3430 if (ret == -EINVAL) { 3431 ui__error("Invalid --fields key: `%s'", tok); 3432 break; 3433 } else if (ret == -ESRCH) { 3434 ui__error("Unknown --fields key: `%s'", tok); 3435 break; 3436 } 3437 } 3438 3439 return ret; 3440 } 3441 3442 void reset_dimensions(void) 3443 { 3444 unsigned int i; 3445 3446 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3447 common_sort_dimensions[i].taken = 0; 3448 3449 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3450 hpp_sort_dimensions[i].taken = 0; 3451 3452 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3453 bstack_sort_dimensions[i].taken = 0; 3454 3455 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3456 memory_sort_dimensions[i].taken = 0; 3457 } 3458 3459 bool is_strict_order(const char *order) 3460 { 3461 return order && (*order != '+'); 3462 } 3463 3464 static int __setup_output_field(void) 3465 { 3466 char *str, *strp; 3467 int ret = -EINVAL; 3468 3469 if (field_order == NULL) 3470 return 0; 3471 3472 strp = str = strdup(field_order); 3473 if (str == NULL) { 3474 pr_err("Not enough memory to setup output fields"); 3475 return -ENOMEM; 3476 } 3477 3478 if (!is_strict_order(field_order)) 3479 strp++; 3480 3481 if (!strlen(strp)) { 3482 ui__error("Invalid --fields key: `+'"); 3483 goto out; 3484 } 3485 3486 ret = setup_output_list(&perf_hpp_list, strp); 3487 3488 out: 3489 free(str); 3490 return ret; 3491 } 3492 3493 int setup_sorting(struct evlist *evlist) 3494 { 3495 int err; 3496 3497 err = __setup_sorting(evlist); 3498 if (err < 0) 3499 return err; 3500 3501 if (parent_pattern != default_parent_pattern) { 3502 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3503 if (err < 0) 3504 return err; 3505 } 3506 3507 reset_dimensions(); 3508 3509 /* 3510 * perf diff doesn't use default hpp output fields. 3511 */ 3512 if (sort__mode != SORT_MODE__DIFF) 3513 perf_hpp__init(); 3514 3515 err = __setup_output_field(); 3516 if (err < 0) 3517 return err; 3518 3519 /* copy sort keys to output fields */ 3520 perf_hpp__setup_output_field(&perf_hpp_list); 3521 /* and then copy output fields to sort keys */ 3522 perf_hpp__append_sort_keys(&perf_hpp_list); 3523 3524 /* setup hists-specific output fields */ 3525 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3526 return -1; 3527 3528 return 0; 3529 } 3530 3531 void reset_output_field(void) 3532 { 3533 perf_hpp_list.need_collapse = 0; 3534 perf_hpp_list.parent = 0; 3535 perf_hpp_list.sym = 0; 3536 perf_hpp_list.dso = 0; 3537 3538 field_order = NULL; 3539 sort_order = NULL; 3540 3541 reset_dimensions(); 3542 perf_hpp__reset_output_field(&perf_hpp_list); 3543 } 3544 3545 #define INDENT (3*8 + 1) 3546 3547 static void add_key(struct strbuf *sb, const char *str, int *llen) 3548 { 3549 if (!str) 3550 return; 3551 3552 if (*llen >= 75) { 3553 strbuf_addstr(sb, "\n\t\t\t "); 3554 *llen = INDENT; 3555 } 3556 strbuf_addf(sb, " %s", str); 3557 *llen += strlen(str) + 1; 3558 } 3559 3560 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3561 int *llen) 3562 { 3563 int i; 3564 3565 for (i = 0; i < n; i++) 3566 add_key(sb, s[i].name, llen); 3567 } 3568 3569 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3570 int *llen) 3571 { 3572 int i; 3573 3574 for (i = 0; i < n; i++) 3575 add_key(sb, s[i].name, llen); 3576 } 3577 3578 char *sort_help(const char *prefix) 3579 { 3580 struct strbuf sb; 3581 char *s; 3582 int len = strlen(prefix) + INDENT; 3583 3584 strbuf_init(&sb, 300); 3585 strbuf_addstr(&sb, prefix); 3586 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3587 ARRAY_SIZE(hpp_sort_dimensions), &len); 3588 add_sort_string(&sb, common_sort_dimensions, 3589 ARRAY_SIZE(common_sort_dimensions), &len); 3590 add_sort_string(&sb, bstack_sort_dimensions, 3591 ARRAY_SIZE(bstack_sort_dimensions), &len); 3592 add_sort_string(&sb, memory_sort_dimensions, 3593 ARRAY_SIZE(memory_sort_dimensions), &len); 3594 s = strbuf_detach(&sb, NULL); 3595 strbuf_release(&sb); 3596 return s; 3597 } 3598