1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include <stdlib.h> 7 #include "callchain.h" 8 #include "debug.h" 9 #include "dso.h" 10 #include "env.h" 11 #include "event.h" 12 #include "evsel.h" 13 #include "hist.h" 14 #include "machine.h" 15 #include "map.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "mem-events.h" 19 #include "mem-info.h" 20 #include "path.h" 21 #include "srcline.h" 22 #include "symbol.h" 23 #include "sort.h" 24 #include "strlist.h" 25 #include "target.h" 26 #include "thread.h" 27 #include "util.h" 28 #include "vdso.h" 29 #include <stdbool.h> 30 #include <sys/types.h> 31 #include <sys/stat.h> 32 #include <unistd.h> 33 #include "unwind.h" 34 #include "linux/hash.h" 35 #include "asm/bug.h" 36 #include "bpf-event.h" 37 #include <internal/lib.h> // page_size 38 #include "cgroup.h" 39 #include "arm64-frame-pointer-unwind-support.h" 40 #include <api/io_dir.h> 41 42 #include <linux/ctype.h> 43 #include <symbol/kallsyms.h> 44 #include <linux/mman.h> 45 #include <linux/string.h> 46 #include <linux/zalloc.h> 47 48 static struct dso *machine__kernel_dso(struct machine *machine) 49 { 50 return map__dso(machine->vmlinux_map); 51 } 52 53 static int machine__set_mmap_name(struct machine *machine) 54 { 55 if (machine__is_host(machine)) 56 machine->mmap_name = strdup("[kernel.kallsyms]"); 57 else if (machine__is_default_guest(machine)) 58 machine->mmap_name = strdup("[guest.kernel.kallsyms]"); 59 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", 60 machine->pid) < 0) 61 machine->mmap_name = NULL; 62 63 return machine->mmap_name ? 0 : -ENOMEM; 64 } 65 66 static void thread__set_guest_comm(struct thread *thread, pid_t pid) 67 { 68 char comm[64]; 69 70 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 71 thread__set_comm(thread, comm, 0); 72 } 73 74 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 75 { 76 int err = -ENOMEM; 77 78 memset(machine, 0, sizeof(*machine)); 79 machine->kmaps = maps__new(machine); 80 if (machine->kmaps == NULL) 81 return -ENOMEM; 82 83 RB_CLEAR_NODE(&machine->rb_node); 84 dsos__init(&machine->dsos); 85 86 threads__init(&machine->threads); 87 88 machine->vdso_info = NULL; 89 machine->env = NULL; 90 91 machine->pid = pid; 92 93 machine->id_hdr_size = 0; 94 machine->kptr_restrict_warned = false; 95 machine->comm_exec = false; 96 machine->kernel_start = 0; 97 machine->vmlinux_map = NULL; 98 /* There is no initial context switch in, so we start at 1. */ 99 machine->parallelism = 1; 100 101 machine->root_dir = strdup(root_dir); 102 if (machine->root_dir == NULL) 103 goto out; 104 105 if (machine__set_mmap_name(machine)) 106 goto out; 107 108 if (pid != HOST_KERNEL_ID) { 109 struct thread *thread = machine__findnew_thread(machine, -1, 110 pid); 111 112 if (thread == NULL) 113 goto out; 114 115 thread__set_guest_comm(thread, pid); 116 thread__put(thread); 117 } 118 119 machine->current_tid = NULL; 120 err = 0; 121 122 out: 123 if (err) { 124 zfree(&machine->kmaps); 125 zfree(&machine->root_dir); 126 zfree(&machine->mmap_name); 127 } 128 return 0; 129 } 130 131 struct machine *machine__new_host(void) 132 { 133 struct machine *machine = malloc(sizeof(*machine)); 134 135 if (machine != NULL) { 136 machine__init(machine, "", HOST_KERNEL_ID); 137 138 if (machine__create_kernel_maps(machine) < 0) 139 goto out_delete; 140 141 machine->env = &perf_env; 142 } 143 144 return machine; 145 out_delete: 146 free(machine); 147 return NULL; 148 } 149 150 struct machine *machine__new_kallsyms(void) 151 { 152 struct machine *machine = machine__new_host(); 153 /* 154 * FIXME: 155 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly 156 * ask for not using the kcore parsing code, once this one is fixed 157 * to create a map per module. 158 */ 159 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { 160 machine__delete(machine); 161 machine = NULL; 162 } 163 164 return machine; 165 } 166 167 void machine__delete_threads(struct machine *machine) 168 { 169 threads__remove_all_threads(&machine->threads); 170 } 171 172 void machine__exit(struct machine *machine) 173 { 174 if (machine == NULL) 175 return; 176 177 machine__destroy_kernel_maps(machine); 178 maps__zput(machine->kmaps); 179 dsos__exit(&machine->dsos); 180 machine__exit_vdso(machine); 181 zfree(&machine->root_dir); 182 zfree(&machine->mmap_name); 183 zfree(&machine->current_tid); 184 zfree(&machine->kallsyms_filename); 185 186 threads__exit(&machine->threads); 187 } 188 189 void machine__delete(struct machine *machine) 190 { 191 if (machine) { 192 machine__exit(machine); 193 free(machine); 194 } 195 } 196 197 void machines__init(struct machines *machines) 198 { 199 machine__init(&machines->host, "", HOST_KERNEL_ID); 200 machines->guests = RB_ROOT_CACHED; 201 } 202 203 void machines__exit(struct machines *machines) 204 { 205 machine__exit(&machines->host); 206 /* XXX exit guest */ 207 } 208 209 struct machine *machines__add(struct machines *machines, pid_t pid, 210 const char *root_dir) 211 { 212 struct rb_node **p = &machines->guests.rb_root.rb_node; 213 struct rb_node *parent = NULL; 214 struct machine *pos, *machine = malloc(sizeof(*machine)); 215 bool leftmost = true; 216 217 if (machine == NULL) 218 return NULL; 219 220 if (machine__init(machine, root_dir, pid) != 0) { 221 free(machine); 222 return NULL; 223 } 224 225 while (*p != NULL) { 226 parent = *p; 227 pos = rb_entry(parent, struct machine, rb_node); 228 if (pid < pos->pid) 229 p = &(*p)->rb_left; 230 else { 231 p = &(*p)->rb_right; 232 leftmost = false; 233 } 234 } 235 236 rb_link_node(&machine->rb_node, parent, p); 237 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost); 238 239 machine->machines = machines; 240 241 return machine; 242 } 243 244 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 245 { 246 struct rb_node *nd; 247 248 machines->host.comm_exec = comm_exec; 249 250 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 251 struct machine *machine = rb_entry(nd, struct machine, rb_node); 252 253 machine->comm_exec = comm_exec; 254 } 255 } 256 257 struct machine *machines__find(struct machines *machines, pid_t pid) 258 { 259 struct rb_node **p = &machines->guests.rb_root.rb_node; 260 struct rb_node *parent = NULL; 261 struct machine *machine; 262 struct machine *default_machine = NULL; 263 264 if (pid == HOST_KERNEL_ID) 265 return &machines->host; 266 267 while (*p != NULL) { 268 parent = *p; 269 machine = rb_entry(parent, struct machine, rb_node); 270 if (pid < machine->pid) 271 p = &(*p)->rb_left; 272 else if (pid > machine->pid) 273 p = &(*p)->rb_right; 274 else 275 return machine; 276 if (!machine->pid) 277 default_machine = machine; 278 } 279 280 return default_machine; 281 } 282 283 struct machine *machines__findnew(struct machines *machines, pid_t pid) 284 { 285 char path[PATH_MAX]; 286 const char *root_dir = ""; 287 struct machine *machine = machines__find(machines, pid); 288 289 if (machine && (machine->pid == pid)) 290 goto out; 291 292 if ((pid != HOST_KERNEL_ID) && 293 (pid != DEFAULT_GUEST_KERNEL_ID) && 294 (symbol_conf.guestmount)) { 295 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 296 if (access(path, R_OK)) { 297 static struct strlist *seen; 298 299 if (!seen) 300 seen = strlist__new(NULL, NULL); 301 302 if (!strlist__has_entry(seen, path)) { 303 pr_err("Can't access file %s\n", path); 304 strlist__add(seen, path); 305 } 306 machine = NULL; 307 goto out; 308 } 309 root_dir = path; 310 } 311 312 machine = machines__add(machines, pid, root_dir); 313 out: 314 return machine; 315 } 316 317 struct machine *machines__find_guest(struct machines *machines, pid_t pid) 318 { 319 struct machine *machine = machines__find(machines, pid); 320 321 if (!machine) 322 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 323 return machine; 324 } 325 326 /* 327 * A common case for KVM test programs is that the test program acts as the 328 * hypervisor, creating, running and destroying the virtual machine, and 329 * providing the guest object code from its own object code. In this case, 330 * the VM is not running an OS, but only the functions loaded into it by the 331 * hypervisor test program, and conveniently, loaded at the same virtual 332 * addresses. 333 * 334 * Normally to resolve addresses, MMAP events are needed to map addresses 335 * back to the object code and debug symbols for that object code. 336 * 337 * Currently, there is no way to get such mapping information from guests 338 * but, in the scenario described above, the guest has the same mappings 339 * as the hypervisor, so support for that scenario can be achieved. 340 * 341 * To support that, copy the host thread's maps to the guest thread's maps. 342 * Note, we do not discover the guest until we encounter a guest event, 343 * which works well because it is not until then that we know that the host 344 * thread's maps have been set up. 345 * 346 * This function returns the guest thread. Apart from keeping the data 347 * structures sane, using a thread belonging to the guest machine, instead 348 * of the host thread, allows it to have its own comm (refer 349 * thread__set_guest_comm()). 350 */ 351 static struct thread *findnew_guest_code(struct machine *machine, 352 struct machine *host_machine, 353 pid_t pid) 354 { 355 struct thread *host_thread; 356 struct thread *thread; 357 int err; 358 359 if (!machine) 360 return NULL; 361 362 thread = machine__findnew_thread(machine, -1, pid); 363 if (!thread) 364 return NULL; 365 366 /* Assume maps are set up if there are any */ 367 if (!maps__empty(thread__maps(thread))) 368 return thread; 369 370 host_thread = machine__find_thread(host_machine, -1, pid); 371 if (!host_thread) 372 goto out_err; 373 374 thread__set_guest_comm(thread, pid); 375 376 /* 377 * Guest code can be found in hypervisor process at the same address 378 * so copy host maps. 379 */ 380 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread)); 381 thread__put(host_thread); 382 if (err) 383 goto out_err; 384 385 return thread; 386 387 out_err: 388 thread__zput(thread); 389 return NULL; 390 } 391 392 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid) 393 { 394 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID); 395 struct machine *machine = machines__findnew(machines, pid); 396 397 return findnew_guest_code(machine, host_machine, pid); 398 } 399 400 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid) 401 { 402 struct machines *machines = machine->machines; 403 struct machine *host_machine; 404 405 if (!machines) 406 return NULL; 407 408 host_machine = machines__find(machines, HOST_KERNEL_ID); 409 410 return findnew_guest_code(machine, host_machine, pid); 411 } 412 413 void machines__process_guests(struct machines *machines, 414 machine__process_t process, void *data) 415 { 416 struct rb_node *nd; 417 418 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 419 struct machine *pos = rb_entry(nd, struct machine, rb_node); 420 process(pos, data); 421 } 422 } 423 424 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 425 { 426 struct rb_node *node; 427 struct machine *machine; 428 429 machines->host.id_hdr_size = id_hdr_size; 430 431 for (node = rb_first_cached(&machines->guests); node; 432 node = rb_next(node)) { 433 machine = rb_entry(node, struct machine, rb_node); 434 machine->id_hdr_size = id_hdr_size; 435 } 436 437 return; 438 } 439 440 static void machine__update_thread_pid(struct machine *machine, 441 struct thread *th, pid_t pid) 442 { 443 struct thread *leader; 444 445 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1) 446 return; 447 448 thread__set_pid(th, pid); 449 450 if (thread__pid(th) == thread__tid(th)) 451 return; 452 453 leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); 454 if (!leader) 455 goto out_err; 456 457 if (!thread__maps(leader)) 458 thread__set_maps(leader, maps__new(machine)); 459 460 if (!thread__maps(leader)) 461 goto out_err; 462 463 if (thread__maps(th) == thread__maps(leader)) 464 goto out_put; 465 466 if (thread__maps(th)) { 467 /* 468 * Maps are created from MMAP events which provide the pid and 469 * tid. Consequently there never should be any maps on a thread 470 * with an unknown pid. Just print an error if there are. 471 */ 472 if (!maps__empty(thread__maps(th))) 473 pr_err("Discarding thread maps for %d:%d\n", 474 thread__pid(th), thread__tid(th)); 475 maps__put(thread__maps(th)); 476 } 477 478 thread__set_maps(th, maps__get(thread__maps(leader))); 479 out_put: 480 thread__put(leader); 481 return; 482 out_err: 483 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th)); 484 goto out_put; 485 } 486 487 /* 488 * Caller must eventually drop thread->refcnt returned with a successful 489 * lookup/new thread inserted. 490 */ 491 static struct thread *__machine__findnew_thread(struct machine *machine, 492 pid_t pid, 493 pid_t tid, 494 bool create) 495 { 496 struct thread *th = threads__find(&machine->threads, tid); 497 bool created; 498 499 if (th) { 500 machine__update_thread_pid(machine, th, pid); 501 return th; 502 } 503 if (!create) 504 return NULL; 505 506 th = threads__findnew(&machine->threads, pid, tid, &created); 507 if (created) { 508 /* 509 * We have to initialize maps separately after rb tree is 510 * updated. 511 * 512 * The reason is that we call machine__findnew_thread within 513 * thread__init_maps to find the thread leader and that would 514 * screwed the rb tree. 515 */ 516 if (thread__init_maps(th, machine)) { 517 pr_err("Thread init failed thread %d\n", pid); 518 threads__remove(&machine->threads, th); 519 thread__put(th); 520 return NULL; 521 } 522 } else 523 machine__update_thread_pid(machine, th, pid); 524 525 return th; 526 } 527 528 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 529 { 530 return __machine__findnew_thread(machine, pid, tid, /*create=*/true); 531 } 532 533 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 534 pid_t tid) 535 { 536 return __machine__findnew_thread(machine, pid, tid, /*create=*/false); 537 } 538 539 /* 540 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 541 * So here a single thread is created for that, but actually there is a separate 542 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 543 * is only 1. That causes problems for some tools, requiring workarounds. For 544 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 545 */ 546 struct thread *machine__idle_thread(struct machine *machine) 547 { 548 struct thread *thread = machine__findnew_thread(machine, 0, 0); 549 550 if (!thread || thread__set_comm(thread, "swapper", 0) || 551 thread__set_namespaces(thread, 0, NULL)) 552 pr_err("problem inserting idle task for machine pid %d\n", machine->pid); 553 554 return thread; 555 } 556 557 struct comm *machine__thread_exec_comm(struct machine *machine, 558 struct thread *thread) 559 { 560 if (machine->comm_exec) 561 return thread__exec_comm(thread); 562 else 563 return thread__comm(thread); 564 } 565 566 int machine__process_comm_event(struct machine *machine, union perf_event *event, 567 struct perf_sample *sample) 568 { 569 struct thread *thread = machine__findnew_thread(machine, 570 event->comm.pid, 571 event->comm.tid); 572 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 573 int err = 0; 574 575 if (exec) 576 machine->comm_exec = true; 577 578 if (dump_trace) 579 perf_event__fprintf_comm(event, stdout); 580 581 if (thread == NULL || 582 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 583 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 584 err = -1; 585 } 586 587 thread__put(thread); 588 589 return err; 590 } 591 592 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 593 union perf_event *event, 594 struct perf_sample *sample __maybe_unused) 595 { 596 struct thread *thread = machine__findnew_thread(machine, 597 event->namespaces.pid, 598 event->namespaces.tid); 599 int err = 0; 600 601 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 602 "\nWARNING: kernel seems to support more namespaces than perf" 603 " tool.\nTry updating the perf tool..\n\n"); 604 605 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 606 "\nWARNING: perf tool seems to support more namespaces than" 607 " the kernel.\nTry updating the kernel..\n\n"); 608 609 if (dump_trace) 610 perf_event__fprintf_namespaces(event, stdout); 611 612 if (thread == NULL || 613 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 614 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 615 err = -1; 616 } 617 618 thread__put(thread); 619 620 return err; 621 } 622 623 int machine__process_cgroup_event(struct machine *machine, 624 union perf_event *event, 625 struct perf_sample *sample __maybe_unused) 626 { 627 struct cgroup *cgrp; 628 629 if (dump_trace) 630 perf_event__fprintf_cgroup(event, stdout); 631 632 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path); 633 if (cgrp == NULL) 634 return -ENOMEM; 635 636 return 0; 637 } 638 639 int machine__process_lost_event(struct machine *machine __maybe_unused, 640 union perf_event *event, struct perf_sample *sample __maybe_unused) 641 { 642 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n", 643 event->lost.id, event->lost.lost); 644 return 0; 645 } 646 647 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 648 union perf_event *event, struct perf_sample *sample) 649 { 650 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n", 651 sample->id, event->lost_samples.lost, 652 event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : ""); 653 return 0; 654 } 655 656 int machine__process_aux_event(struct machine *machine __maybe_unused, 657 union perf_event *event) 658 { 659 if (dump_trace) 660 perf_event__fprintf_aux(event, stdout); 661 return 0; 662 } 663 664 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 665 union perf_event *event) 666 { 667 if (dump_trace) 668 perf_event__fprintf_itrace_start(event, stdout); 669 return 0; 670 } 671 672 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused, 673 union perf_event *event) 674 { 675 if (dump_trace) 676 perf_event__fprintf_aux_output_hw_id(event, stdout); 677 return 0; 678 } 679 680 int machine__process_switch_event(struct machine *machine __maybe_unused, 681 union perf_event *event) 682 { 683 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 684 685 if (dump_trace) 686 perf_event__fprintf_switch(event, stdout); 687 machine->parallelism += out ? -1 : 1; 688 return 0; 689 } 690 691 static int machine__process_ksymbol_register(struct machine *machine, 692 union perf_event *event, 693 struct perf_sample *sample __maybe_unused) 694 { 695 struct symbol *sym; 696 struct dso *dso = NULL; 697 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 698 int err = 0; 699 700 if (!map) { 701 dso = dso__new(event->ksymbol.name); 702 703 if (!dso) { 704 err = -ENOMEM; 705 goto out; 706 } 707 dso__set_kernel(dso, DSO_SPACE__KERNEL); 708 map = map__new2(0, dso); 709 if (!map) { 710 err = -ENOMEM; 711 goto out; 712 } 713 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { 714 dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL); 715 dso__data(dso)->file_size = event->ksymbol.len; 716 dso__set_loaded(dso); 717 } 718 719 map__set_start(map, event->ksymbol.addr); 720 map__set_end(map, map__start(map) + event->ksymbol.len); 721 err = maps__fixup_overlap_and_insert(machine__kernel_maps(machine), map); 722 if (err) { 723 err = -ENOMEM; 724 goto out; 725 } 726 727 dso__set_loaded(dso); 728 729 if (is_bpf_image(event->ksymbol.name)) { 730 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE); 731 dso__set_long_name(dso, "", false); 732 } 733 } else { 734 dso = dso__get(map__dso(map)); 735 } 736 737 sym = symbol__new(map__map_ip(map, map__start(map)), 738 event->ksymbol.len, 739 0, 0, event->ksymbol.name); 740 if (!sym) { 741 err = -ENOMEM; 742 goto out; 743 } 744 dso__insert_symbol(dso, sym); 745 out: 746 map__put(map); 747 dso__put(dso); 748 return err; 749 } 750 751 static int machine__process_ksymbol_unregister(struct machine *machine, 752 union perf_event *event, 753 struct perf_sample *sample __maybe_unused) 754 { 755 struct symbol *sym; 756 struct map *map; 757 758 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 759 if (!map) 760 return 0; 761 762 if (!RC_CHK_EQUAL(map, machine->vmlinux_map)) 763 maps__remove(machine__kernel_maps(machine), map); 764 else { 765 struct dso *dso = map__dso(map); 766 767 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map))); 768 if (sym) 769 dso__delete_symbol(dso, sym); 770 } 771 map__put(map); 772 return 0; 773 } 774 775 int machine__process_ksymbol(struct machine *machine __maybe_unused, 776 union perf_event *event, 777 struct perf_sample *sample) 778 { 779 if (dump_trace) 780 perf_event__fprintf_ksymbol(event, stdout); 781 782 /* no need to process non-JIT BPF as it cannot get samples */ 783 if (event->ksymbol.len == 0) 784 return 0; 785 786 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER) 787 return machine__process_ksymbol_unregister(machine, event, 788 sample); 789 return machine__process_ksymbol_register(machine, event, sample); 790 } 791 792 int machine__process_text_poke(struct machine *machine, union perf_event *event, 793 struct perf_sample *sample __maybe_unused) 794 { 795 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr); 796 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 797 struct dso *dso = map ? map__dso(map) : NULL; 798 799 if (dump_trace) 800 perf_event__fprintf_text_poke(event, machine, stdout); 801 802 if (!event->text_poke.new_len) 803 goto out; 804 805 if (cpumode != PERF_RECORD_MISC_KERNEL) { 806 pr_debug("%s: unsupported cpumode - ignoring\n", __func__); 807 goto out; 808 } 809 810 if (dso) { 811 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len; 812 int ret; 813 814 /* 815 * Kernel maps might be changed when loading symbols so loading 816 * must be done prior to using kernel maps. 817 */ 818 map__load(map); 819 ret = dso__data_write_cache_addr(dso, map, machine, 820 event->text_poke.addr, 821 new_bytes, 822 event->text_poke.new_len); 823 if (ret != event->text_poke.new_len) 824 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n", 825 event->text_poke.addr); 826 } else { 827 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", 828 event->text_poke.addr); 829 } 830 out: 831 map__put(map); 832 return 0; 833 } 834 835 static struct map *machine__addnew_module_map(struct machine *machine, u64 start, 836 const char *filename) 837 { 838 struct map *map = NULL; 839 struct kmod_path m; 840 struct dso *dso; 841 int err; 842 843 if (kmod_path__parse_name(&m, filename)) 844 return NULL; 845 846 dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename); 847 if (dso == NULL) 848 goto out; 849 850 map = map__new2(start, dso); 851 if (map == NULL) 852 goto out; 853 854 err = maps__insert(machine__kernel_maps(machine), map); 855 /* If maps__insert failed, return NULL. */ 856 if (err) { 857 map__put(map); 858 map = NULL; 859 } 860 out: 861 /* put the dso here, corresponding to machine__findnew_module_dso */ 862 dso__put(dso); 863 zfree(&m.name); 864 return map; 865 } 866 867 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 868 { 869 struct rb_node *nd; 870 size_t ret = dsos__fprintf(&machines->host.dsos, fp); 871 872 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 873 struct machine *pos = rb_entry(nd, struct machine, rb_node); 874 ret += dsos__fprintf(&pos->dsos, fp); 875 } 876 877 return ret; 878 } 879 880 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 881 bool (skip)(struct dso *dso, int parm), int parm) 882 { 883 return dsos__fprintf_buildid(&m->dsos, fp, skip, parm); 884 } 885 886 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 887 bool (skip)(struct dso *dso, int parm), int parm) 888 { 889 struct rb_node *nd; 890 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 891 892 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 893 struct machine *pos = rb_entry(nd, struct machine, rb_node); 894 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 895 } 896 return ret; 897 } 898 899 struct machine_fprintf_cb_args { 900 FILE *fp; 901 size_t printed; 902 }; 903 904 static int machine_fprintf_cb(struct thread *thread, void *data) 905 { 906 struct machine_fprintf_cb_args *args = data; 907 908 /* TODO: handle fprintf errors. */ 909 args->printed += thread__fprintf(thread, args->fp); 910 return 0; 911 } 912 913 size_t machine__fprintf(struct machine *machine, FILE *fp) 914 { 915 struct machine_fprintf_cb_args args = { 916 .fp = fp, 917 .printed = 0, 918 }; 919 size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads)); 920 921 machine__for_each_thread(machine, machine_fprintf_cb, &args); 922 return ret + args.printed; 923 } 924 925 static struct dso *machine__get_kernel(struct machine *machine) 926 { 927 const char *vmlinux_name = machine->mmap_name; 928 struct dso *kernel; 929 930 if (machine__is_host(machine)) { 931 if (symbol_conf.vmlinux_name) 932 vmlinux_name = symbol_conf.vmlinux_name; 933 934 kernel = machine__findnew_kernel(machine, vmlinux_name, 935 "[kernel]", DSO_SPACE__KERNEL); 936 } else { 937 if (symbol_conf.default_guest_vmlinux_name) 938 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 939 940 kernel = machine__findnew_kernel(machine, vmlinux_name, 941 "[guest.kernel]", 942 DSO_SPACE__KERNEL_GUEST); 943 } 944 945 if (kernel != NULL && (!dso__has_build_id(kernel))) 946 dso__read_running_kernel_build_id(kernel, machine); 947 948 return kernel; 949 } 950 951 void machine__get_kallsyms_filename(struct machine *machine, char *buf, 952 size_t bufsz) 953 { 954 if (machine__is_default_guest(machine)) 955 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 956 else 957 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 958 } 959 960 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 961 962 /* Figure out the start address of kernel map from /proc/kallsyms. 963 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 964 * symbol_name if it's not that important. 965 */ 966 static int machine__get_running_kernel_start(struct machine *machine, 967 const char **symbol_name, 968 u64 *start, u64 *end) 969 { 970 char filename[PATH_MAX]; 971 int i, err = -1; 972 const char *name; 973 u64 addr = 0; 974 975 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 976 977 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 978 return 0; 979 980 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 981 err = kallsyms__get_function_start(filename, name, &addr); 982 if (!err) 983 break; 984 } 985 986 if (err) 987 return -1; 988 989 if (symbol_name) 990 *symbol_name = name; 991 992 *start = addr; 993 994 err = kallsyms__get_symbol_start(filename, "_edata", &addr); 995 if (err) 996 err = kallsyms__get_symbol_start(filename, "_etext", &addr); 997 if (!err) 998 *end = addr; 999 1000 return 0; 1001 } 1002 1003 int machine__create_extra_kernel_map(struct machine *machine, 1004 struct dso *kernel, 1005 struct extra_kernel_map *xm) 1006 { 1007 struct kmap *kmap; 1008 struct map *map; 1009 int err; 1010 1011 map = map__new2(xm->start, kernel); 1012 if (!map) 1013 return -ENOMEM; 1014 1015 map__set_end(map, xm->end); 1016 map__set_pgoff(map, xm->pgoff); 1017 1018 kmap = map__kmap(map); 1019 1020 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); 1021 1022 err = maps__insert(machine__kernel_maps(machine), map); 1023 1024 if (!err) { 1025 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", 1026 kmap->name, map__start(map), map__end(map)); 1027 } 1028 1029 map__put(map); 1030 1031 return err; 1032 } 1033 1034 static u64 find_entry_trampoline(struct dso *dso) 1035 { 1036 /* Duplicates are removed so lookup all aliases */ 1037 const char *syms[] = { 1038 "_entry_trampoline", 1039 "__entry_trampoline_start", 1040 "entry_SYSCALL_64_trampoline", 1041 }; 1042 struct symbol *sym = dso__first_symbol(dso); 1043 unsigned int i; 1044 1045 for (; sym; sym = dso__next_symbol(sym)) { 1046 if (sym->binding != STB_GLOBAL) 1047 continue; 1048 for (i = 0; i < ARRAY_SIZE(syms); i++) { 1049 if (!strcmp(sym->name, syms[i])) 1050 return sym->start; 1051 } 1052 } 1053 1054 return 0; 1055 } 1056 1057 /* 1058 * These values can be used for kernels that do not have symbols for the entry 1059 * trampolines in kallsyms. 1060 */ 1061 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL 1062 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 1063 #define X86_64_ENTRY_TRAMPOLINE 0x6000 1064 1065 struct machine__map_x86_64_entry_trampolines_args { 1066 struct maps *kmaps; 1067 bool found; 1068 }; 1069 1070 static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data) 1071 { 1072 struct machine__map_x86_64_entry_trampolines_args *args = data; 1073 struct map *dest_map; 1074 struct kmap *kmap = __map__kmap(map); 1075 1076 if (!kmap || !is_entry_trampoline(kmap->name)) 1077 return 0; 1078 1079 dest_map = maps__find(args->kmaps, map__pgoff(map)); 1080 if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map)) 1081 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); 1082 1083 map__put(dest_map); 1084 args->found = true; 1085 return 0; 1086 } 1087 1088 /* Map x86_64 PTI entry trampolines */ 1089 int machine__map_x86_64_entry_trampolines(struct machine *machine, 1090 struct dso *kernel) 1091 { 1092 struct machine__map_x86_64_entry_trampolines_args args = { 1093 .kmaps = machine__kernel_maps(machine), 1094 .found = false, 1095 }; 1096 int nr_cpus_avail, cpu; 1097 u64 pgoff; 1098 1099 /* 1100 * In the vmlinux case, pgoff is a virtual address which must now be 1101 * mapped to a vmlinux offset. 1102 */ 1103 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args); 1104 1105 if (args.found || machine->trampolines_mapped) 1106 return 0; 1107 1108 pgoff = find_entry_trampoline(kernel); 1109 if (!pgoff) 1110 return 0; 1111 1112 nr_cpus_avail = machine__nr_cpus_avail(machine); 1113 1114 /* Add a 1 page map for each CPU's entry trampoline */ 1115 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { 1116 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + 1117 cpu * X86_64_CPU_ENTRY_AREA_SIZE + 1118 X86_64_ENTRY_TRAMPOLINE; 1119 struct extra_kernel_map xm = { 1120 .start = va, 1121 .end = va + page_size, 1122 .pgoff = pgoff, 1123 }; 1124 1125 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); 1126 1127 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) 1128 return -1; 1129 } 1130 1131 machine->trampolines_mapped = nr_cpus_avail; 1132 1133 return 0; 1134 } 1135 1136 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, 1137 struct dso *kernel __maybe_unused) 1138 { 1139 return 0; 1140 } 1141 1142 static int 1143 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 1144 { 1145 /* In case of renewal the kernel map, destroy previous one */ 1146 machine__destroy_kernel_maps(machine); 1147 1148 map__put(machine->vmlinux_map); 1149 machine->vmlinux_map = map__new2(0, kernel); 1150 if (machine->vmlinux_map == NULL) 1151 return -ENOMEM; 1152 1153 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY); 1154 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map); 1155 } 1156 1157 void machine__destroy_kernel_maps(struct machine *machine) 1158 { 1159 struct kmap *kmap; 1160 struct map *map = machine__kernel_map(machine); 1161 1162 if (map == NULL) 1163 return; 1164 1165 kmap = map__kmap(map); 1166 maps__remove(machine__kernel_maps(machine), map); 1167 if (kmap && kmap->ref_reloc_sym) { 1168 zfree((char **)&kmap->ref_reloc_sym->name); 1169 zfree(&kmap->ref_reloc_sym); 1170 } 1171 1172 map__zput(machine->vmlinux_map); 1173 } 1174 1175 int machines__create_guest_kernel_maps(struct machines *machines) 1176 { 1177 int ret = 0; 1178 struct dirent **namelist = NULL; 1179 int i, items = 0; 1180 char path[PATH_MAX]; 1181 pid_t pid; 1182 char *endp; 1183 1184 if (symbol_conf.default_guest_vmlinux_name || 1185 symbol_conf.default_guest_modules || 1186 symbol_conf.default_guest_kallsyms) { 1187 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 1188 } 1189 1190 if (symbol_conf.guestmount) { 1191 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 1192 if (items <= 0) 1193 return -ENOENT; 1194 for (i = 0; i < items; i++) { 1195 if (!isdigit(namelist[i]->d_name[0])) { 1196 /* Filter out . and .. */ 1197 continue; 1198 } 1199 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 1200 if ((*endp != '\0') || 1201 (endp == namelist[i]->d_name) || 1202 (errno == ERANGE)) { 1203 pr_debug("invalid directory (%s). Skipping.\n", 1204 namelist[i]->d_name); 1205 continue; 1206 } 1207 sprintf(path, "%s/%s/proc/kallsyms", 1208 symbol_conf.guestmount, 1209 namelist[i]->d_name); 1210 ret = access(path, R_OK); 1211 if (ret) { 1212 pr_debug("Can't access file %s\n", path); 1213 goto failure; 1214 } 1215 machines__create_kernel_maps(machines, pid); 1216 } 1217 failure: 1218 free(namelist); 1219 } 1220 1221 return ret; 1222 } 1223 1224 void machines__destroy_kernel_maps(struct machines *machines) 1225 { 1226 struct rb_node *next = rb_first_cached(&machines->guests); 1227 1228 machine__destroy_kernel_maps(&machines->host); 1229 1230 while (next) { 1231 struct machine *pos = rb_entry(next, struct machine, rb_node); 1232 1233 next = rb_next(&pos->rb_node); 1234 rb_erase_cached(&pos->rb_node, &machines->guests); 1235 machine__delete(pos); 1236 } 1237 } 1238 1239 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 1240 { 1241 struct machine *machine = machines__findnew(machines, pid); 1242 1243 if (machine == NULL) 1244 return -1; 1245 1246 return machine__create_kernel_maps(machine); 1247 } 1248 1249 int machine__load_kallsyms(struct machine *machine, const char *filename) 1250 { 1251 struct map *map = machine__kernel_map(machine); 1252 struct dso *dso = map__dso(map); 1253 int ret = __dso__load_kallsyms(dso, filename, map, true); 1254 1255 if (ret > 0) { 1256 dso__set_loaded(dso); 1257 /* 1258 * Since /proc/kallsyms will have multiple sessions for the 1259 * kernel, with modules between them, fixup the end of all 1260 * sections. 1261 */ 1262 maps__fixup_end(machine__kernel_maps(machine)); 1263 } 1264 1265 return ret; 1266 } 1267 1268 int machine__load_vmlinux_path(struct machine *machine) 1269 { 1270 struct map *map = machine__kernel_map(machine); 1271 struct dso *dso = map__dso(map); 1272 int ret = dso__load_vmlinux_path(dso, map); 1273 1274 if (ret > 0) 1275 dso__set_loaded(dso); 1276 1277 return ret; 1278 } 1279 1280 static char *get_kernel_version(const char *root_dir) 1281 { 1282 char version[PATH_MAX]; 1283 FILE *file; 1284 char *name, *tmp; 1285 const char *prefix = "Linux version "; 1286 1287 sprintf(version, "%s/proc/version", root_dir); 1288 file = fopen(version, "r"); 1289 if (!file) 1290 return NULL; 1291 1292 tmp = fgets(version, sizeof(version), file); 1293 fclose(file); 1294 if (!tmp) 1295 return NULL; 1296 1297 name = strstr(version, prefix); 1298 if (!name) 1299 return NULL; 1300 name += strlen(prefix); 1301 tmp = strchr(name, ' '); 1302 if (tmp) 1303 *tmp = '\0'; 1304 1305 return strdup(name); 1306 } 1307 1308 static bool is_kmod_dso(struct dso *dso) 1309 { 1310 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1311 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE; 1312 } 1313 1314 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) 1315 { 1316 char *long_name; 1317 struct dso *dso; 1318 struct map *map = maps__find_by_name(maps, m->name); 1319 1320 if (map == NULL) 1321 return 0; 1322 1323 long_name = strdup(path); 1324 if (long_name == NULL) { 1325 map__put(map); 1326 return -ENOMEM; 1327 } 1328 1329 dso = map__dso(map); 1330 dso__set_long_name(dso, long_name, true); 1331 dso__kernel_module_get_build_id(dso, ""); 1332 1333 /* 1334 * Full name could reveal us kmod compression, so 1335 * we need to update the symtab_type if needed. 1336 */ 1337 if (m->comp && is_kmod_dso(dso)) { 1338 dso__set_symtab_type(dso, dso__symtab_type(dso)+1); 1339 dso__set_comp(dso, m->comp); 1340 } 1341 map__put(map); 1342 return 0; 1343 } 1344 1345 static int maps__set_modules_path_dir(struct maps *maps, char *path, size_t path_size, int depth) 1346 { 1347 struct io_dirent64 *dent; 1348 struct io_dir iod; 1349 size_t root_len = strlen(path); 1350 int ret = 0; 1351 1352 io_dir__init(&iod, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 1353 if (iod.dirfd < 0) { 1354 pr_debug("%s: cannot open %s dir\n", __func__, path); 1355 return -1; 1356 } 1357 /* Bounds check, should never happen. */ 1358 if (root_len >= path_size) 1359 return -1; 1360 path[root_len++] = '/'; 1361 while ((dent = io_dir__readdir(&iod)) != NULL) { 1362 if (io_dir__is_dir(&iod, dent)) { 1363 if (!strcmp(dent->d_name, ".") || 1364 !strcmp(dent->d_name, "..")) 1365 continue; 1366 1367 /* Do not follow top-level source and build symlinks */ 1368 if (depth == 0) { 1369 if (!strcmp(dent->d_name, "source") || 1370 !strcmp(dent->d_name, "build")) 1371 continue; 1372 } 1373 1374 /* Bounds check, should never happen. */ 1375 if (root_len + strlen(dent->d_name) >= path_size) 1376 continue; 1377 1378 strcpy(path + root_len, dent->d_name); 1379 ret = maps__set_modules_path_dir(maps, path, path_size, depth + 1); 1380 if (ret < 0) 1381 goto out; 1382 } else { 1383 struct kmod_path m; 1384 1385 ret = kmod_path__parse_name(&m, dent->d_name); 1386 if (ret) 1387 goto out; 1388 1389 if (m.kmod) { 1390 /* Bounds check, should never happen. */ 1391 if (root_len + strlen(dent->d_name) < path_size) { 1392 strcpy(path + root_len, dent->d_name); 1393 ret = maps__set_module_path(maps, path, &m); 1394 1395 } 1396 } 1397 zfree(&m.name); 1398 1399 if (ret) 1400 goto out; 1401 } 1402 } 1403 1404 out: 1405 close(iod.dirfd); 1406 return ret; 1407 } 1408 1409 static int machine__set_modules_path(struct machine *machine) 1410 { 1411 char *version; 1412 char modules_path[PATH_MAX]; 1413 1414 version = get_kernel_version(machine->root_dir); 1415 if (!version) 1416 return -1; 1417 1418 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1419 machine->root_dir, version); 1420 free(version); 1421 1422 return maps__set_modules_path_dir(machine__kernel_maps(machine), 1423 modules_path, sizeof(modules_path), 0); 1424 } 1425 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1426 u64 *size __maybe_unused, 1427 const char *name __maybe_unused) 1428 { 1429 return 0; 1430 } 1431 1432 static int machine__create_module(void *arg, const char *name, u64 start, 1433 u64 size) 1434 { 1435 struct machine *machine = arg; 1436 struct map *map; 1437 1438 if (arch__fix_module_text_start(&start, &size, name) < 0) 1439 return -1; 1440 1441 map = machine__addnew_module_map(machine, start, name); 1442 if (map == NULL) 1443 return -1; 1444 map__set_end(map, start + size); 1445 1446 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir); 1447 map__put(map); 1448 return 0; 1449 } 1450 1451 static int machine__create_modules(struct machine *machine) 1452 { 1453 const char *modules; 1454 char path[PATH_MAX]; 1455 1456 if (machine__is_default_guest(machine)) { 1457 modules = symbol_conf.default_guest_modules; 1458 } else { 1459 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1460 modules = path; 1461 } 1462 1463 if (symbol__restricted_filename(modules, "/proc/modules")) 1464 return -1; 1465 1466 if (modules__parse(modules, machine, machine__create_module)) 1467 return -1; 1468 1469 if (!machine__set_modules_path(machine)) 1470 return 0; 1471 1472 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1473 1474 return 0; 1475 } 1476 1477 static void machine__set_kernel_mmap(struct machine *machine, 1478 u64 start, u64 end) 1479 { 1480 map__set_start(machine->vmlinux_map, start); 1481 map__set_end(machine->vmlinux_map, end); 1482 /* 1483 * Be a bit paranoid here, some perf.data file came with 1484 * a zero sized synthesized MMAP event for the kernel. 1485 */ 1486 if (start == 0 && end == 0) 1487 map__set_end(machine->vmlinux_map, ~0ULL); 1488 } 1489 1490 static int machine__update_kernel_mmap(struct machine *machine, 1491 u64 start, u64 end) 1492 { 1493 struct map *orig, *updated; 1494 int err; 1495 1496 orig = machine->vmlinux_map; 1497 updated = map__get(orig); 1498 1499 machine->vmlinux_map = updated; 1500 maps__remove(machine__kernel_maps(machine), orig); 1501 machine__set_kernel_mmap(machine, start, end); 1502 err = maps__insert(machine__kernel_maps(machine), updated); 1503 map__put(orig); 1504 1505 return err; 1506 } 1507 1508 int machine__create_kernel_maps(struct machine *machine) 1509 { 1510 struct dso *kernel = machine__get_kernel(machine); 1511 const char *name = NULL; 1512 u64 start = 0, end = ~0ULL; 1513 int ret; 1514 1515 if (kernel == NULL) 1516 return -1; 1517 1518 ret = __machine__create_kernel_maps(machine, kernel); 1519 if (ret < 0) 1520 goto out_put; 1521 1522 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1523 if (machine__is_host(machine)) 1524 pr_debug("Problems creating module maps, " 1525 "continuing anyway...\n"); 1526 else 1527 pr_debug("Problems creating module maps for guest %d, " 1528 "continuing anyway...\n", machine->pid); 1529 } 1530 1531 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { 1532 if (name && 1533 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { 1534 machine__destroy_kernel_maps(machine); 1535 ret = -1; 1536 goto out_put; 1537 } 1538 1539 /* 1540 * we have a real start address now, so re-order the kmaps 1541 * assume it's the last in the kmaps 1542 */ 1543 ret = machine__update_kernel_mmap(machine, start, end); 1544 if (ret < 0) 1545 goto out_put; 1546 } 1547 1548 if (machine__create_extra_kernel_maps(machine, kernel)) 1549 pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); 1550 1551 if (end == ~0ULL) { 1552 /* update end address of the kernel map using adjacent module address */ 1553 struct map *next = maps__find_next_entry(machine__kernel_maps(machine), 1554 machine__kernel_map(machine)); 1555 1556 if (next) { 1557 machine__set_kernel_mmap(machine, start, map__start(next)); 1558 map__put(next); 1559 } 1560 } 1561 1562 maps__fixup_end(machine__kernel_maps(machine)); 1563 1564 out_put: 1565 dso__put(kernel); 1566 return ret; 1567 } 1568 1569 static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused) 1570 { 1571 return dso__is_kcore(dso) ? 1 : 0; 1572 } 1573 1574 static bool machine__uses_kcore(struct machine *machine) 1575 { 1576 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false; 1577 } 1578 1579 static bool perf_event__is_extra_kernel_mmap(struct machine *machine, 1580 struct extra_kernel_map *xm) 1581 { 1582 return machine__is(machine, "x86_64") && 1583 is_entry_trampoline(xm->name); 1584 } 1585 1586 static int machine__process_extra_kernel_map(struct machine *machine, 1587 struct extra_kernel_map *xm) 1588 { 1589 struct dso *kernel = machine__kernel_dso(machine); 1590 1591 if (kernel == NULL) 1592 return -1; 1593 1594 return machine__create_extra_kernel_map(machine, kernel, xm); 1595 } 1596 1597 static int machine__process_kernel_mmap_event(struct machine *machine, 1598 struct extra_kernel_map *xm, 1599 struct build_id *bid) 1600 { 1601 enum dso_space_type dso_space; 1602 bool is_kernel_mmap; 1603 const char *mmap_name = machine->mmap_name; 1604 1605 /* If we have maps from kcore then we do not need or want any others */ 1606 if (machine__uses_kcore(machine)) 1607 return 0; 1608 1609 if (machine__is_host(machine)) 1610 dso_space = DSO_SPACE__KERNEL; 1611 else 1612 dso_space = DSO_SPACE__KERNEL_GUEST; 1613 1614 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1615 if (!is_kernel_mmap && !machine__is_host(machine)) { 1616 /* 1617 * If the event was recorded inside the guest and injected into 1618 * the host perf.data file, then it will match a host mmap_name, 1619 * so try that - see machine__set_mmap_name(). 1620 */ 1621 mmap_name = "[kernel.kallsyms]"; 1622 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1623 } 1624 if (xm->name[0] == '/' || 1625 (!is_kernel_mmap && xm->name[0] == '[')) { 1626 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name); 1627 1628 if (map == NULL) 1629 goto out_problem; 1630 1631 map__set_end(map, map__start(map) + xm->end - xm->start); 1632 1633 if (build_id__is_defined(bid)) 1634 dso__set_build_id(map__dso(map), bid); 1635 1636 map__put(map); 1637 } else if (is_kernel_mmap) { 1638 const char *symbol_name = xm->name + strlen(mmap_name); 1639 /* 1640 * Should be there already, from the build-id table in 1641 * the header. 1642 */ 1643 struct dso *kernel = dsos__find_kernel_dso(&machine->dsos); 1644 1645 if (kernel == NULL) 1646 kernel = machine__findnew_dso(machine, machine->mmap_name); 1647 if (kernel == NULL) 1648 goto out_problem; 1649 1650 dso__set_kernel(kernel, dso_space); 1651 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1652 dso__put(kernel); 1653 goto out_problem; 1654 } 1655 1656 if (strstr(dso__long_name(kernel), "vmlinux")) 1657 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1658 1659 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { 1660 dso__put(kernel); 1661 goto out_problem; 1662 } 1663 1664 if (build_id__is_defined(bid)) 1665 dso__set_build_id(kernel, bid); 1666 1667 /* 1668 * Avoid using a zero address (kptr_restrict) for the ref reloc 1669 * symbol. Effectively having zero here means that at record 1670 * time /proc/sys/kernel/kptr_restrict was non zero. 1671 */ 1672 if (xm->pgoff != 0) { 1673 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, 1674 symbol_name, 1675 xm->pgoff); 1676 } 1677 1678 if (machine__is_default_guest(machine)) { 1679 /* 1680 * preload dso of guest kernel and modules 1681 */ 1682 dso__load(kernel, machine__kernel_map(machine)); 1683 } 1684 dso__put(kernel); 1685 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) { 1686 return machine__process_extra_kernel_map(machine, xm); 1687 } 1688 return 0; 1689 out_problem: 1690 return -1; 1691 } 1692 1693 int machine__process_mmap2_event(struct machine *machine, 1694 union perf_event *event, 1695 struct perf_sample *sample) 1696 { 1697 struct thread *thread; 1698 struct map *map; 1699 struct dso_id dso_id = { 1700 .maj = event->mmap2.maj, 1701 .min = event->mmap2.min, 1702 .ino = event->mmap2.ino, 1703 .ino_generation = event->mmap2.ino_generation, 1704 }; 1705 struct build_id __bid, *bid = NULL; 1706 int ret = 0; 1707 1708 if (dump_trace) 1709 perf_event__fprintf_mmap2(event, stdout); 1710 1711 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 1712 bid = &__bid; 1713 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size); 1714 } 1715 1716 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1717 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1718 struct extra_kernel_map xm = { 1719 .start = event->mmap2.start, 1720 .end = event->mmap2.start + event->mmap2.len, 1721 .pgoff = event->mmap2.pgoff, 1722 }; 1723 1724 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN); 1725 ret = machine__process_kernel_mmap_event(machine, &xm, bid); 1726 if (ret < 0) 1727 goto out_problem; 1728 return 0; 1729 } 1730 1731 thread = machine__findnew_thread(machine, event->mmap2.pid, 1732 event->mmap2.tid); 1733 if (thread == NULL) 1734 goto out_problem; 1735 1736 map = map__new(machine, event->mmap2.start, 1737 event->mmap2.len, event->mmap2.pgoff, 1738 &dso_id, event->mmap2.prot, 1739 event->mmap2.flags, bid, 1740 event->mmap2.filename, thread); 1741 1742 if (map == NULL) 1743 goto out_problem_map; 1744 1745 ret = thread__insert_map(thread, map); 1746 if (ret) 1747 goto out_problem_insert; 1748 1749 thread__put(thread); 1750 map__put(map); 1751 return 0; 1752 1753 out_problem_insert: 1754 map__put(map); 1755 out_problem_map: 1756 thread__put(thread); 1757 out_problem: 1758 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1759 return 0; 1760 } 1761 1762 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1763 struct perf_sample *sample) 1764 { 1765 struct thread *thread; 1766 struct map *map; 1767 u32 prot = 0; 1768 int ret = 0; 1769 1770 if (dump_trace) 1771 perf_event__fprintf_mmap(event, stdout); 1772 1773 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1774 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1775 struct extra_kernel_map xm = { 1776 .start = event->mmap.start, 1777 .end = event->mmap.start + event->mmap.len, 1778 .pgoff = event->mmap.pgoff, 1779 }; 1780 1781 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); 1782 ret = machine__process_kernel_mmap_event(machine, &xm, NULL); 1783 if (ret < 0) 1784 goto out_problem; 1785 return 0; 1786 } 1787 1788 thread = machine__findnew_thread(machine, event->mmap.pid, 1789 event->mmap.tid); 1790 if (thread == NULL) 1791 goto out_problem; 1792 1793 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) 1794 prot = PROT_EXEC; 1795 1796 map = map__new(machine, event->mmap.start, 1797 event->mmap.len, event->mmap.pgoff, 1798 NULL, prot, 0, NULL, event->mmap.filename, thread); 1799 1800 if (map == NULL) 1801 goto out_problem_map; 1802 1803 ret = thread__insert_map(thread, map); 1804 if (ret) 1805 goto out_problem_insert; 1806 1807 thread__put(thread); 1808 map__put(map); 1809 return 0; 1810 1811 out_problem_insert: 1812 map__put(map); 1813 out_problem_map: 1814 thread__put(thread); 1815 out_problem: 1816 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1817 return 0; 1818 } 1819 1820 void machine__remove_thread(struct machine *machine, struct thread *th) 1821 { 1822 return threads__remove(&machine->threads, th); 1823 } 1824 1825 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1826 struct perf_sample *sample) 1827 { 1828 struct thread *thread = machine__find_thread(machine, 1829 event->fork.pid, 1830 event->fork.tid); 1831 struct thread *parent = machine__findnew_thread(machine, 1832 event->fork.ppid, 1833 event->fork.ptid); 1834 bool do_maps_clone = true; 1835 int err = 0; 1836 1837 if (dump_trace) 1838 perf_event__fprintf_task(event, stdout); 1839 1840 /* 1841 * There may be an existing thread that is not actually the parent, 1842 * either because we are processing events out of order, or because the 1843 * (fork) event that would have removed the thread was lost. Assume the 1844 * latter case and continue on as best we can. 1845 */ 1846 if (thread__pid(parent) != (pid_t)event->fork.ppid) { 1847 dump_printf("removing erroneous parent thread %d/%d\n", 1848 thread__pid(parent), thread__tid(parent)); 1849 machine__remove_thread(machine, parent); 1850 thread__put(parent); 1851 parent = machine__findnew_thread(machine, event->fork.ppid, 1852 event->fork.ptid); 1853 } 1854 1855 /* if a thread currently exists for the thread id remove it */ 1856 if (thread != NULL) { 1857 machine__remove_thread(machine, thread); 1858 thread__put(thread); 1859 } 1860 1861 thread = machine__findnew_thread(machine, event->fork.pid, 1862 event->fork.tid); 1863 /* 1864 * When synthesizing FORK events, we are trying to create thread 1865 * objects for the already running tasks on the machine. 1866 * 1867 * Normally, for a kernel FORK event, we want to clone the parent's 1868 * maps because that is what the kernel just did. 1869 * 1870 * But when synthesizing, this should not be done. If we do, we end up 1871 * with overlapping maps as we process the synthesized MMAP2 events that 1872 * get delivered shortly thereafter. 1873 * 1874 * Use the FORK event misc flags in an internal way to signal this 1875 * situation, so we can elide the map clone when appropriate. 1876 */ 1877 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) 1878 do_maps_clone = false; 1879 1880 if (thread == NULL || parent == NULL || 1881 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { 1882 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1883 err = -1; 1884 } 1885 thread__put(thread); 1886 thread__put(parent); 1887 1888 return err; 1889 } 1890 1891 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1892 struct perf_sample *sample __maybe_unused) 1893 { 1894 struct thread *thread = machine__find_thread(machine, 1895 event->fork.pid, 1896 event->fork.tid); 1897 1898 if (dump_trace) 1899 perf_event__fprintf_task(event, stdout); 1900 1901 /* There is no context switch out before exit, so we decrement here. */ 1902 machine->parallelism--; 1903 if (thread != NULL) { 1904 if (symbol_conf.keep_exited_threads) 1905 thread__set_exited(thread, /*exited=*/true); 1906 else 1907 machine__remove_thread(machine, thread); 1908 } 1909 thread__put(thread); 1910 return 0; 1911 } 1912 1913 int machine__process_event(struct machine *machine, union perf_event *event, 1914 struct perf_sample *sample) 1915 { 1916 int ret; 1917 1918 switch (event->header.type) { 1919 case PERF_RECORD_COMM: 1920 ret = machine__process_comm_event(machine, event, sample); break; 1921 case PERF_RECORD_MMAP: 1922 ret = machine__process_mmap_event(machine, event, sample); break; 1923 case PERF_RECORD_NAMESPACES: 1924 ret = machine__process_namespaces_event(machine, event, sample); break; 1925 case PERF_RECORD_CGROUP: 1926 ret = machine__process_cgroup_event(machine, event, sample); break; 1927 case PERF_RECORD_MMAP2: 1928 ret = machine__process_mmap2_event(machine, event, sample); break; 1929 case PERF_RECORD_FORK: 1930 ret = machine__process_fork_event(machine, event, sample); break; 1931 case PERF_RECORD_EXIT: 1932 ret = machine__process_exit_event(machine, event, sample); break; 1933 case PERF_RECORD_LOST: 1934 ret = machine__process_lost_event(machine, event, sample); break; 1935 case PERF_RECORD_AUX: 1936 ret = machine__process_aux_event(machine, event); break; 1937 case PERF_RECORD_ITRACE_START: 1938 ret = machine__process_itrace_start_event(machine, event); break; 1939 case PERF_RECORD_LOST_SAMPLES: 1940 ret = machine__process_lost_samples_event(machine, event, sample); break; 1941 case PERF_RECORD_SWITCH: 1942 case PERF_RECORD_SWITCH_CPU_WIDE: 1943 ret = machine__process_switch_event(machine, event); break; 1944 case PERF_RECORD_KSYMBOL: 1945 ret = machine__process_ksymbol(machine, event, sample); break; 1946 case PERF_RECORD_BPF_EVENT: 1947 ret = machine__process_bpf(machine, event, sample); break; 1948 case PERF_RECORD_TEXT_POKE: 1949 ret = machine__process_text_poke(machine, event, sample); break; 1950 case PERF_RECORD_AUX_OUTPUT_HW_ID: 1951 ret = machine__process_aux_output_hw_id_event(machine, event); break; 1952 default: 1953 ret = -1; 1954 break; 1955 } 1956 1957 return ret; 1958 } 1959 1960 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1961 { 1962 return regexec(regex, sym->name, 0, NULL, 0) == 0; 1963 } 1964 1965 static void ip__resolve_ams(struct thread *thread, 1966 struct addr_map_symbol *ams, 1967 u64 ip) 1968 { 1969 struct addr_location al; 1970 1971 addr_location__init(&al); 1972 /* 1973 * We cannot use the header.misc hint to determine whether a 1974 * branch stack address is user, kernel, guest, hypervisor. 1975 * Branches may straddle the kernel/user/hypervisor boundaries. 1976 * Thus, we have to try consecutively until we find a match 1977 * or else, the symbol is unknown 1978 */ 1979 thread__find_cpumode_addr_location(thread, ip, &al); 1980 1981 ams->addr = ip; 1982 ams->al_addr = al.addr; 1983 ams->al_level = al.level; 1984 ams->ms.maps = maps__get(al.maps); 1985 ams->ms.sym = al.sym; 1986 ams->ms.map = map__get(al.map); 1987 ams->phys_addr = 0; 1988 ams->data_page_size = 0; 1989 addr_location__exit(&al); 1990 } 1991 1992 static void ip__resolve_data(struct thread *thread, 1993 u8 m, struct addr_map_symbol *ams, 1994 u64 addr, u64 phys_addr, u64 daddr_page_size) 1995 { 1996 struct addr_location al; 1997 1998 addr_location__init(&al); 1999 2000 thread__find_symbol(thread, m, addr, &al); 2001 2002 ams->addr = addr; 2003 ams->al_addr = al.addr; 2004 ams->al_level = al.level; 2005 ams->ms.maps = maps__get(al.maps); 2006 ams->ms.sym = al.sym; 2007 ams->ms.map = map__get(al.map); 2008 ams->phys_addr = phys_addr; 2009 ams->data_page_size = daddr_page_size; 2010 addr_location__exit(&al); 2011 } 2012 2013 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 2014 struct addr_location *al) 2015 { 2016 struct mem_info *mi = mem_info__new(); 2017 2018 if (!mi) 2019 return NULL; 2020 2021 ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip); 2022 ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi), 2023 sample->addr, sample->phys_addr, 2024 sample->data_page_size); 2025 mem_info__data_src(mi)->val = sample->data_src; 2026 2027 return mi; 2028 } 2029 2030 static char *callchain_srcline(struct map_symbol *ms, u64 ip) 2031 { 2032 struct map *map = ms->map; 2033 char *srcline = NULL; 2034 struct dso *dso; 2035 2036 if (!map || callchain_param.key == CCKEY_FUNCTION) 2037 return srcline; 2038 2039 dso = map__dso(map); 2040 srcline = srcline__tree_find(dso__srclines(dso), ip); 2041 if (!srcline) { 2042 bool show_sym = false; 2043 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 2044 2045 srcline = get_srcline(dso, map__rip_2objdump(map, ip), 2046 ms->sym, show_sym, show_addr, ip); 2047 srcline__tree_insert(dso__srclines(dso), ip, srcline); 2048 } 2049 2050 return srcline; 2051 } 2052 2053 struct iterations { 2054 int nr_loop_iter; 2055 u64 cycles; 2056 }; 2057 2058 static int add_callchain_ip(struct thread *thread, 2059 struct callchain_cursor *cursor, 2060 struct symbol **parent, 2061 struct addr_location *root_al, 2062 u8 *cpumode, 2063 u64 ip, 2064 bool branch, 2065 struct branch_flags *flags, 2066 struct iterations *iter, 2067 u64 branch_from, 2068 bool symbols) 2069 { 2070 struct map_symbol ms = {}; 2071 struct addr_location al; 2072 int nr_loop_iter = 0, err = 0; 2073 u64 iter_cycles = 0; 2074 const char *srcline = NULL; 2075 2076 addr_location__init(&al); 2077 al.filtered = 0; 2078 al.sym = NULL; 2079 al.srcline = NULL; 2080 if (!cpumode) { 2081 thread__find_cpumode_addr_location(thread, ip, &al); 2082 } else { 2083 if (ip >= PERF_CONTEXT_MAX) { 2084 switch (ip) { 2085 case PERF_CONTEXT_HV: 2086 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 2087 break; 2088 case PERF_CONTEXT_KERNEL: 2089 *cpumode = PERF_RECORD_MISC_KERNEL; 2090 break; 2091 case PERF_CONTEXT_USER: 2092 *cpumode = PERF_RECORD_MISC_USER; 2093 break; 2094 default: 2095 pr_debug("invalid callchain context: " 2096 "%"PRId64"\n", (s64) ip); 2097 /* 2098 * It seems the callchain is corrupted. 2099 * Discard all. 2100 */ 2101 callchain_cursor_reset(cursor); 2102 err = 1; 2103 goto out; 2104 } 2105 goto out; 2106 } 2107 if (symbols) 2108 thread__find_symbol(thread, *cpumode, ip, &al); 2109 } 2110 2111 if (al.sym != NULL) { 2112 if (perf_hpp_list.parent && !*parent && 2113 symbol__match_regex(al.sym, &parent_regex)) 2114 *parent = al.sym; 2115 else if (have_ignore_callees && root_al && 2116 symbol__match_regex(al.sym, &ignore_callees_regex)) { 2117 /* Treat this symbol as the root, 2118 forgetting its callees. */ 2119 addr_location__copy(root_al, &al); 2120 callchain_cursor_reset(cursor); 2121 } 2122 } 2123 2124 if (symbol_conf.hide_unresolved && al.sym == NULL) 2125 goto out; 2126 2127 if (iter) { 2128 nr_loop_iter = iter->nr_loop_iter; 2129 iter_cycles = iter->cycles; 2130 } 2131 2132 ms.maps = maps__get(al.maps); 2133 ms.map = map__get(al.map); 2134 ms.sym = al.sym; 2135 srcline = callchain_srcline(&ms, al.addr); 2136 err = callchain_cursor_append(cursor, ip, &ms, 2137 branch, flags, nr_loop_iter, 2138 iter_cycles, branch_from, srcline); 2139 out: 2140 addr_location__exit(&al); 2141 map_symbol__exit(&ms); 2142 return err; 2143 } 2144 2145 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 2146 struct addr_location *al) 2147 { 2148 unsigned int i; 2149 const struct branch_stack *bs = sample->branch_stack; 2150 struct branch_entry *entries = perf_sample__branch_entries(sample); 2151 u64 *branch_stack_cntr = sample->branch_stack_cntr; 2152 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 2153 2154 if (!bi) 2155 return NULL; 2156 2157 for (i = 0; i < bs->nr; i++) { 2158 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to); 2159 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from); 2160 bi[i].flags = entries[i].flags; 2161 if (branch_stack_cntr) 2162 bi[i].branch_stack_cntr = branch_stack_cntr[i]; 2163 } 2164 return bi; 2165 } 2166 2167 static void save_iterations(struct iterations *iter, 2168 struct branch_entry *be, int nr) 2169 { 2170 int i; 2171 2172 iter->nr_loop_iter++; 2173 iter->cycles = 0; 2174 2175 for (i = 0; i < nr; i++) 2176 iter->cycles += be[i].flags.cycles; 2177 } 2178 2179 #define CHASHSZ 127 2180 #define CHASHBITS 7 2181 #define NO_ENTRY 0xff 2182 2183 #define PERF_MAX_BRANCH_DEPTH 127 2184 2185 /* Remove loops. */ 2186 static int remove_loops(struct branch_entry *l, int nr, 2187 struct iterations *iter) 2188 { 2189 int i, j, off; 2190 unsigned char chash[CHASHSZ]; 2191 2192 memset(chash, NO_ENTRY, sizeof(chash)); 2193 2194 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 2195 2196 for (i = 0; i < nr; i++) { 2197 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 2198 2199 /* no collision handling for now */ 2200 if (chash[h] == NO_ENTRY) { 2201 chash[h] = i; 2202 } else if (l[chash[h]].from == l[i].from) { 2203 bool is_loop = true; 2204 /* check if it is a real loop */ 2205 off = 0; 2206 for (j = chash[h]; j < i && i + off < nr; j++, off++) 2207 if (l[j].from != l[i + off].from) { 2208 is_loop = false; 2209 break; 2210 } 2211 if (is_loop) { 2212 j = nr - (i + off); 2213 if (j > 0) { 2214 save_iterations(iter + i + off, 2215 l + i, off); 2216 2217 memmove(iter + i, iter + i + off, 2218 j * sizeof(*iter)); 2219 2220 memmove(l + i, l + i + off, 2221 j * sizeof(*l)); 2222 } 2223 2224 nr -= off; 2225 } 2226 } 2227 } 2228 return nr; 2229 } 2230 2231 static int lbr_callchain_add_kernel_ip(struct thread *thread, 2232 struct callchain_cursor *cursor, 2233 struct perf_sample *sample, 2234 struct symbol **parent, 2235 struct addr_location *root_al, 2236 u64 branch_from, 2237 bool callee, int end, 2238 bool symbols) 2239 { 2240 struct ip_callchain *chain = sample->callchain; 2241 u8 cpumode = PERF_RECORD_MISC_USER; 2242 int err, i; 2243 2244 if (callee) { 2245 for (i = 0; i < end + 1; i++) { 2246 err = add_callchain_ip(thread, cursor, parent, 2247 root_al, &cpumode, chain->ips[i], 2248 false, NULL, NULL, branch_from, 2249 symbols); 2250 if (err) 2251 return err; 2252 } 2253 return 0; 2254 } 2255 2256 for (i = end; i >= 0; i--) { 2257 err = add_callchain_ip(thread, cursor, parent, 2258 root_al, &cpumode, chain->ips[i], 2259 false, NULL, NULL, branch_from, 2260 symbols); 2261 if (err) 2262 return err; 2263 } 2264 2265 return 0; 2266 } 2267 2268 static void save_lbr_cursor_node(struct thread *thread, 2269 struct callchain_cursor *cursor, 2270 int idx) 2271 { 2272 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2273 2274 if (!lbr_stitch) 2275 return; 2276 2277 if (cursor->pos == cursor->nr) { 2278 lbr_stitch->prev_lbr_cursor[idx].valid = false; 2279 return; 2280 } 2281 2282 if (!cursor->curr) 2283 cursor->curr = cursor->first; 2284 else 2285 cursor->curr = cursor->curr->next; 2286 2287 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms); 2288 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr, 2289 sizeof(struct callchain_cursor_node)); 2290 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps); 2291 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map); 2292 2293 lbr_stitch->prev_lbr_cursor[idx].valid = true; 2294 cursor->pos++; 2295 } 2296 2297 static int lbr_callchain_add_lbr_ip(struct thread *thread, 2298 struct callchain_cursor *cursor, 2299 struct perf_sample *sample, 2300 struct symbol **parent, 2301 struct addr_location *root_al, 2302 u64 *branch_from, 2303 bool callee, 2304 bool symbols) 2305 { 2306 struct branch_stack *lbr_stack = sample->branch_stack; 2307 struct branch_entry *entries = perf_sample__branch_entries(sample); 2308 u8 cpumode = PERF_RECORD_MISC_USER; 2309 int lbr_nr = lbr_stack->nr; 2310 struct branch_flags *flags; 2311 int err, i; 2312 u64 ip; 2313 2314 /* 2315 * The curr and pos are not used in writing session. They are cleared 2316 * in callchain_cursor_commit() when the writing session is closed. 2317 * Using curr and pos to track the current cursor node. 2318 */ 2319 if (thread__lbr_stitch(thread)) { 2320 cursor->curr = NULL; 2321 cursor->pos = cursor->nr; 2322 if (cursor->nr) { 2323 cursor->curr = cursor->first; 2324 for (i = 0; i < (int)(cursor->nr - 1); i++) 2325 cursor->curr = cursor->curr->next; 2326 } 2327 } 2328 2329 if (callee) { 2330 /* Add LBR ip from first entries.to */ 2331 ip = entries[0].to; 2332 flags = &entries[0].flags; 2333 *branch_from = entries[0].from; 2334 err = add_callchain_ip(thread, cursor, parent, 2335 root_al, &cpumode, ip, 2336 true, flags, NULL, 2337 *branch_from, symbols); 2338 if (err) 2339 return err; 2340 2341 /* 2342 * The number of cursor node increases. 2343 * Move the current cursor node. 2344 * But does not need to save current cursor node for entry 0. 2345 * It's impossible to stitch the whole LBRs of previous sample. 2346 */ 2347 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) { 2348 if (!cursor->curr) 2349 cursor->curr = cursor->first; 2350 else 2351 cursor->curr = cursor->curr->next; 2352 cursor->pos++; 2353 } 2354 2355 /* Add LBR ip from entries.from one by one. */ 2356 for (i = 0; i < lbr_nr; i++) { 2357 ip = entries[i].from; 2358 flags = &entries[i].flags; 2359 err = add_callchain_ip(thread, cursor, parent, 2360 root_al, &cpumode, ip, 2361 true, flags, NULL, 2362 *branch_from, symbols); 2363 if (err) 2364 return err; 2365 save_lbr_cursor_node(thread, cursor, i); 2366 } 2367 return 0; 2368 } 2369 2370 /* Add LBR ip from entries.from one by one. */ 2371 for (i = lbr_nr - 1; i >= 0; i--) { 2372 ip = entries[i].from; 2373 flags = &entries[i].flags; 2374 err = add_callchain_ip(thread, cursor, parent, 2375 root_al, &cpumode, ip, 2376 true, flags, NULL, 2377 *branch_from, symbols); 2378 if (err) 2379 return err; 2380 save_lbr_cursor_node(thread, cursor, i); 2381 } 2382 2383 if (lbr_nr > 0) { 2384 /* Add LBR ip from first entries.to */ 2385 ip = entries[0].to; 2386 flags = &entries[0].flags; 2387 *branch_from = entries[0].from; 2388 err = add_callchain_ip(thread, cursor, parent, 2389 root_al, &cpumode, ip, 2390 true, flags, NULL, 2391 *branch_from, symbols); 2392 if (err) 2393 return err; 2394 } 2395 2396 return 0; 2397 } 2398 2399 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread, 2400 struct callchain_cursor *cursor) 2401 { 2402 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2403 struct callchain_cursor_node *cnode; 2404 struct stitch_list *stitch_node; 2405 int err; 2406 2407 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) { 2408 cnode = &stitch_node->cursor; 2409 2410 err = callchain_cursor_append(cursor, cnode->ip, 2411 &cnode->ms, 2412 cnode->branch, 2413 &cnode->branch_flags, 2414 cnode->nr_loop_iter, 2415 cnode->iter_cycles, 2416 cnode->branch_from, 2417 cnode->srcline); 2418 if (err) 2419 return err; 2420 } 2421 return 0; 2422 } 2423 2424 static struct stitch_list *get_stitch_node(struct thread *thread) 2425 { 2426 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2427 struct stitch_list *stitch_node; 2428 2429 if (!list_empty(&lbr_stitch->free_lists)) { 2430 stitch_node = list_first_entry(&lbr_stitch->free_lists, 2431 struct stitch_list, node); 2432 list_del(&stitch_node->node); 2433 2434 return stitch_node; 2435 } 2436 2437 return malloc(sizeof(struct stitch_list)); 2438 } 2439 2440 static bool has_stitched_lbr(struct thread *thread, 2441 struct perf_sample *cur, 2442 struct perf_sample *prev, 2443 unsigned int max_lbr, 2444 bool callee) 2445 { 2446 struct branch_stack *cur_stack = cur->branch_stack; 2447 struct branch_entry *cur_entries = perf_sample__branch_entries(cur); 2448 struct branch_stack *prev_stack = prev->branch_stack; 2449 struct branch_entry *prev_entries = perf_sample__branch_entries(prev); 2450 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2451 int i, j, nr_identical_branches = 0; 2452 struct stitch_list *stitch_node; 2453 u64 cur_base, distance; 2454 2455 if (!cur_stack || !prev_stack) 2456 return false; 2457 2458 /* Find the physical index of the base-of-stack for current sample. */ 2459 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1; 2460 2461 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) : 2462 (max_lbr + prev_stack->hw_idx - cur_base); 2463 /* Previous sample has shorter stack. Nothing can be stitched. */ 2464 if (distance + 1 > prev_stack->nr) 2465 return false; 2466 2467 /* 2468 * Check if there are identical LBRs between two samples. 2469 * Identical LBRs must have same from, to and flags values. Also, 2470 * they have to be saved in the same LBR registers (same physical 2471 * index). 2472 * 2473 * Starts from the base-of-stack of current sample. 2474 */ 2475 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { 2476 if ((prev_entries[i].from != cur_entries[j].from) || 2477 (prev_entries[i].to != cur_entries[j].to) || 2478 (prev_entries[i].flags.value != cur_entries[j].flags.value)) 2479 break; 2480 nr_identical_branches++; 2481 } 2482 2483 if (!nr_identical_branches) 2484 return false; 2485 2486 /* 2487 * Save the LBRs between the base-of-stack of previous sample 2488 * and the base-of-stack of current sample into lbr_stitch->lists. 2489 * These LBRs will be stitched later. 2490 */ 2491 for (i = prev_stack->nr - 1; i > (int)distance; i--) { 2492 2493 if (!lbr_stitch->prev_lbr_cursor[i].valid) 2494 continue; 2495 2496 stitch_node = get_stitch_node(thread); 2497 if (!stitch_node) 2498 return false; 2499 2500 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i], 2501 sizeof(struct callchain_cursor_node)); 2502 2503 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps); 2504 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map); 2505 2506 if (callee) 2507 list_add(&stitch_node->node, &lbr_stitch->lists); 2508 else 2509 list_add_tail(&stitch_node->node, &lbr_stitch->lists); 2510 } 2511 2512 return true; 2513 } 2514 2515 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr) 2516 { 2517 if (thread__lbr_stitch(thread)) 2518 return true; 2519 2520 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch))); 2521 if (!thread__lbr_stitch(thread)) 2522 goto err; 2523 2524 thread__lbr_stitch(thread)->prev_lbr_cursor = 2525 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node)); 2526 if (!thread__lbr_stitch(thread)->prev_lbr_cursor) 2527 goto free_lbr_stitch; 2528 2529 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1; 2530 2531 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); 2532 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); 2533 2534 return true; 2535 2536 free_lbr_stitch: 2537 free(thread__lbr_stitch(thread)); 2538 thread__set_lbr_stitch(thread, NULL); 2539 err: 2540 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n"); 2541 thread__set_lbr_stitch_enable(thread, false); 2542 return false; 2543 } 2544 2545 /* 2546 * Resolve LBR callstack chain sample 2547 * Return: 2548 * 1 on success get LBR callchain information 2549 * 0 no available LBR callchain information, should try fp 2550 * negative error code on other errors. 2551 */ 2552 static int resolve_lbr_callchain_sample(struct thread *thread, 2553 struct callchain_cursor *cursor, 2554 struct perf_sample *sample, 2555 struct symbol **parent, 2556 struct addr_location *root_al, 2557 int max_stack, 2558 unsigned int max_lbr, 2559 bool symbols) 2560 { 2561 bool callee = (callchain_param.order == ORDER_CALLEE); 2562 struct ip_callchain *chain = sample->callchain; 2563 int chain_nr = min(max_stack, (int)chain->nr), i; 2564 struct lbr_stitch *lbr_stitch; 2565 bool stitched_lbr = false; 2566 u64 branch_from = 0; 2567 int err; 2568 2569 for (i = 0; i < chain_nr; i++) { 2570 if (chain->ips[i] == PERF_CONTEXT_USER) 2571 break; 2572 } 2573 2574 /* LBR only affects the user callchain */ 2575 if (i == chain_nr) 2576 return 0; 2577 2578 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx && 2579 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { 2580 lbr_stitch = thread__lbr_stitch(thread); 2581 2582 stitched_lbr = has_stitched_lbr(thread, sample, 2583 &lbr_stitch->prev_sample, 2584 max_lbr, callee); 2585 2586 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) { 2587 struct stitch_list *stitch_node; 2588 2589 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) 2590 map_symbol__exit(&stitch_node->cursor.ms); 2591 2592 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists); 2593 } 2594 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); 2595 } 2596 2597 if (callee) { 2598 /* Add kernel ip */ 2599 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2600 parent, root_al, branch_from, 2601 true, i, symbols); 2602 if (err) 2603 goto error; 2604 2605 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2606 root_al, &branch_from, true, symbols); 2607 if (err) 2608 goto error; 2609 2610 if (stitched_lbr) { 2611 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2612 if (err) 2613 goto error; 2614 } 2615 2616 } else { 2617 if (stitched_lbr) { 2618 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2619 if (err) 2620 goto error; 2621 } 2622 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2623 root_al, &branch_from, false, symbols); 2624 if (err) 2625 goto error; 2626 2627 /* Add kernel ip */ 2628 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2629 parent, root_al, branch_from, 2630 false, i, symbols); 2631 if (err) 2632 goto error; 2633 } 2634 return 1; 2635 2636 error: 2637 return (err < 0) ? err : 0; 2638 } 2639 2640 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, 2641 struct callchain_cursor *cursor, 2642 struct symbol **parent, 2643 struct addr_location *root_al, 2644 u8 *cpumode, int ent, bool symbols) 2645 { 2646 int err = 0; 2647 2648 while (--ent >= 0) { 2649 u64 ip = chain->ips[ent]; 2650 2651 if (ip >= PERF_CONTEXT_MAX) { 2652 err = add_callchain_ip(thread, cursor, parent, 2653 root_al, cpumode, ip, 2654 false, NULL, NULL, 0, symbols); 2655 break; 2656 } 2657 } 2658 return err; 2659 } 2660 2661 static u64 get_leaf_frame_caller(struct perf_sample *sample, 2662 struct thread *thread, int usr_idx) 2663 { 2664 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64")) 2665 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx); 2666 else 2667 return 0; 2668 } 2669 2670 static int thread__resolve_callchain_sample(struct thread *thread, 2671 struct callchain_cursor *cursor, 2672 struct evsel *evsel, 2673 struct perf_sample *sample, 2674 struct symbol **parent, 2675 struct addr_location *root_al, 2676 int max_stack, 2677 bool symbols) 2678 { 2679 struct branch_stack *branch = sample->branch_stack; 2680 struct branch_entry *entries = perf_sample__branch_entries(sample); 2681 struct ip_callchain *chain = sample->callchain; 2682 int chain_nr = 0; 2683 u8 cpumode = PERF_RECORD_MISC_USER; 2684 int i, j, err, nr_entries, usr_idx; 2685 int skip_idx = -1; 2686 int first_call = 0; 2687 u64 leaf_frame_caller; 2688 2689 if (chain) 2690 chain_nr = chain->nr; 2691 2692 if (evsel__has_branch_callstack(evsel)) { 2693 struct perf_env *env = evsel__env(evsel); 2694 2695 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2696 root_al, max_stack, 2697 !env ? 0 : env->max_branches, 2698 symbols); 2699 if (err) 2700 return (err < 0) ? err : 0; 2701 } 2702 2703 /* 2704 * Based on DWARF debug information, some architectures skip 2705 * a callchain entry saved by the kernel. 2706 */ 2707 skip_idx = arch_skip_callchain_idx(thread, chain); 2708 2709 /* 2710 * Add branches to call stack for easier browsing. This gives 2711 * more context for a sample than just the callers. 2712 * 2713 * This uses individual histograms of paths compared to the 2714 * aggregated histograms the normal LBR mode uses. 2715 * 2716 * Limitations for now: 2717 * - No extra filters 2718 * - No annotations (should annotate somehow) 2719 */ 2720 2721 if (branch && callchain_param.branch_callstack) { 2722 int nr = min(max_stack, (int)branch->nr); 2723 struct branch_entry be[nr]; 2724 struct iterations iter[nr]; 2725 2726 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2727 pr_warning("corrupted branch chain. skipping...\n"); 2728 goto check_calls; 2729 } 2730 2731 for (i = 0; i < nr; i++) { 2732 if (callchain_param.order == ORDER_CALLEE) { 2733 be[i] = entries[i]; 2734 2735 if (chain == NULL) 2736 continue; 2737 2738 /* 2739 * Check for overlap into the callchain. 2740 * The return address is one off compared to 2741 * the branch entry. To adjust for this 2742 * assume the calling instruction is not longer 2743 * than 8 bytes. 2744 */ 2745 if (i == skip_idx || 2746 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2747 first_call++; 2748 else if (be[i].from < chain->ips[first_call] && 2749 be[i].from >= chain->ips[first_call] - 8) 2750 first_call++; 2751 } else 2752 be[i] = entries[branch->nr - i - 1]; 2753 } 2754 2755 memset(iter, 0, sizeof(struct iterations) * nr); 2756 nr = remove_loops(be, nr, iter); 2757 2758 for (i = 0; i < nr; i++) { 2759 err = add_callchain_ip(thread, cursor, parent, 2760 root_al, 2761 NULL, be[i].to, 2762 true, &be[i].flags, 2763 NULL, be[i].from, symbols); 2764 2765 if (!err) { 2766 err = add_callchain_ip(thread, cursor, parent, root_al, 2767 NULL, be[i].from, 2768 true, &be[i].flags, 2769 &iter[i], 0, symbols); 2770 } 2771 if (err == -EINVAL) 2772 break; 2773 if (err) 2774 return err; 2775 } 2776 2777 if (chain_nr == 0) 2778 return 0; 2779 2780 chain_nr -= nr; 2781 } 2782 2783 check_calls: 2784 if (chain && callchain_param.order != ORDER_CALLEE) { 2785 err = find_prev_cpumode(chain, thread, cursor, parent, root_al, 2786 &cpumode, chain->nr - first_call, symbols); 2787 if (err) 2788 return (err < 0) ? err : 0; 2789 } 2790 for (i = first_call, nr_entries = 0; 2791 i < chain_nr && nr_entries < max_stack; i++) { 2792 u64 ip; 2793 2794 if (callchain_param.order == ORDER_CALLEE) 2795 j = i; 2796 else 2797 j = chain->nr - i - 1; 2798 2799 #ifdef HAVE_SKIP_CALLCHAIN_IDX 2800 if (j == skip_idx) 2801 continue; 2802 #endif 2803 ip = chain->ips[j]; 2804 if (ip < PERF_CONTEXT_MAX) 2805 ++nr_entries; 2806 else if (callchain_param.order != ORDER_CALLEE) { 2807 err = find_prev_cpumode(chain, thread, cursor, parent, 2808 root_al, &cpumode, j, symbols); 2809 if (err) 2810 return (err < 0) ? err : 0; 2811 continue; 2812 } 2813 2814 /* 2815 * PERF_CONTEXT_USER allows us to locate where the user stack ends. 2816 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER, 2817 * the index will be different in order to add the missing frame 2818 * at the right place. 2819 */ 2820 2821 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1; 2822 2823 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { 2824 2825 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx); 2826 2827 /* 2828 * check if leaf_frame_Caller != ip to not add the same 2829 * value twice. 2830 */ 2831 2832 if (leaf_frame_caller && leaf_frame_caller != ip) { 2833 2834 err = add_callchain_ip(thread, cursor, parent, 2835 root_al, &cpumode, leaf_frame_caller, 2836 false, NULL, NULL, 0, symbols); 2837 if (err) 2838 return (err < 0) ? err : 0; 2839 } 2840 } 2841 2842 err = add_callchain_ip(thread, cursor, parent, 2843 root_al, &cpumode, ip, 2844 false, NULL, NULL, 0, symbols); 2845 2846 if (err) 2847 return (err < 0) ? err : 0; 2848 } 2849 2850 return 0; 2851 } 2852 2853 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip) 2854 { 2855 struct symbol *sym = ms->sym; 2856 struct map *map = ms->map; 2857 struct inline_node *inline_node; 2858 struct inline_list *ilist; 2859 struct dso *dso; 2860 u64 addr; 2861 int ret = 1; 2862 struct map_symbol ilist_ms; 2863 2864 if (!symbol_conf.inline_name || !map || !sym) 2865 return ret; 2866 2867 addr = map__dso_map_ip(map, ip); 2868 addr = map__rip_2objdump(map, addr); 2869 dso = map__dso(map); 2870 2871 inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr); 2872 if (!inline_node) { 2873 inline_node = dso__parse_addr_inlines(dso, addr, sym); 2874 if (!inline_node) 2875 return ret; 2876 inlines__tree_insert(dso__inlined_nodes(dso), inline_node); 2877 } 2878 2879 ilist_ms = (struct map_symbol) { 2880 .maps = maps__get(ms->maps), 2881 .map = map__get(map), 2882 }; 2883 list_for_each_entry(ilist, &inline_node->val, list) { 2884 ilist_ms.sym = ilist->symbol; 2885 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false, 2886 NULL, 0, 0, 0, ilist->srcline); 2887 2888 if (ret != 0) 2889 return ret; 2890 } 2891 map_symbol__exit(&ilist_ms); 2892 2893 return ret; 2894 } 2895 2896 static int unwind_entry(struct unwind_entry *entry, void *arg) 2897 { 2898 struct callchain_cursor *cursor = arg; 2899 const char *srcline = NULL; 2900 u64 addr = entry->ip; 2901 2902 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL) 2903 return 0; 2904 2905 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) 2906 return 0; 2907 2908 /* 2909 * Convert entry->ip from a virtual address to an offset in 2910 * its corresponding binary. 2911 */ 2912 if (entry->ms.map) 2913 addr = map__dso_map_ip(entry->ms.map, entry->ip); 2914 2915 srcline = callchain_srcline(&entry->ms, addr); 2916 return callchain_cursor_append(cursor, entry->ip, &entry->ms, 2917 false, NULL, 0, 0, 0, srcline); 2918 } 2919 2920 static int thread__resolve_callchain_unwind(struct thread *thread, 2921 struct callchain_cursor *cursor, 2922 struct evsel *evsel, 2923 struct perf_sample *sample, 2924 int max_stack, bool symbols) 2925 { 2926 /* Can we do dwarf post unwind? */ 2927 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) && 2928 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER))) 2929 return 0; 2930 2931 /* Bail out if nothing was captured. */ 2932 if (!sample->user_regs || !sample->user_regs->regs || 2933 !sample->user_stack.size) 2934 return 0; 2935 2936 if (!symbols) 2937 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n"); 2938 2939 return unwind__get_entries(unwind_entry, cursor, 2940 thread, sample, max_stack, false); 2941 } 2942 2943 int __thread__resolve_callchain(struct thread *thread, 2944 struct callchain_cursor *cursor, 2945 struct evsel *evsel, 2946 struct perf_sample *sample, 2947 struct symbol **parent, 2948 struct addr_location *root_al, 2949 int max_stack, 2950 bool symbols) 2951 { 2952 int ret = 0; 2953 2954 if (cursor == NULL) 2955 return -ENOMEM; 2956 2957 callchain_cursor_reset(cursor); 2958 2959 if (callchain_param.order == ORDER_CALLEE) { 2960 ret = thread__resolve_callchain_sample(thread, cursor, 2961 evsel, sample, 2962 parent, root_al, 2963 max_stack, symbols); 2964 if (ret) 2965 return ret; 2966 ret = thread__resolve_callchain_unwind(thread, cursor, 2967 evsel, sample, 2968 max_stack, symbols); 2969 } else { 2970 ret = thread__resolve_callchain_unwind(thread, cursor, 2971 evsel, sample, 2972 max_stack, symbols); 2973 if (ret) 2974 return ret; 2975 ret = thread__resolve_callchain_sample(thread, cursor, 2976 evsel, sample, 2977 parent, root_al, 2978 max_stack, symbols); 2979 } 2980 2981 return ret; 2982 } 2983 2984 int machine__for_each_thread(struct machine *machine, 2985 int (*fn)(struct thread *thread, void *p), 2986 void *priv) 2987 { 2988 return threads__for_each_thread(&machine->threads, fn, priv); 2989 } 2990 2991 int machines__for_each_thread(struct machines *machines, 2992 int (*fn)(struct thread *thread, void *p), 2993 void *priv) 2994 { 2995 struct rb_node *nd; 2996 int rc = 0; 2997 2998 rc = machine__for_each_thread(&machines->host, fn, priv); 2999 if (rc != 0) 3000 return rc; 3001 3002 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 3003 struct machine *machine = rb_entry(nd, struct machine, rb_node); 3004 3005 rc = machine__for_each_thread(machine, fn, priv); 3006 if (rc != 0) 3007 return rc; 3008 } 3009 return rc; 3010 } 3011 3012 3013 static int thread_list_cb(struct thread *thread, void *data) 3014 { 3015 struct list_head *list = data; 3016 struct thread_list *entry = malloc(sizeof(*entry)); 3017 3018 if (!entry) 3019 return -ENOMEM; 3020 3021 entry->thread = thread__get(thread); 3022 list_add_tail(&entry->list, list); 3023 return 0; 3024 } 3025 3026 int machine__thread_list(struct machine *machine, struct list_head *list) 3027 { 3028 return machine__for_each_thread(machine, thread_list_cb, list); 3029 } 3030 3031 void thread_list__delete(struct list_head *list) 3032 { 3033 struct thread_list *pos, *next; 3034 3035 list_for_each_entry_safe(pos, next, list, list) { 3036 thread__zput(pos->thread); 3037 list_del(&pos->list); 3038 free(pos); 3039 } 3040 } 3041 3042 pid_t machine__get_current_tid(struct machine *machine, int cpu) 3043 { 3044 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) 3045 return -1; 3046 3047 return machine->current_tid[cpu]; 3048 } 3049 3050 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 3051 pid_t tid) 3052 { 3053 struct thread *thread; 3054 const pid_t init_val = -1; 3055 3056 if (cpu < 0) 3057 return -EINVAL; 3058 3059 if (realloc_array_as_needed(machine->current_tid, 3060 machine->current_tid_sz, 3061 (unsigned int)cpu, 3062 &init_val)) 3063 return -ENOMEM; 3064 3065 machine->current_tid[cpu] = tid; 3066 3067 thread = machine__findnew_thread(machine, pid, tid); 3068 if (!thread) 3069 return -ENOMEM; 3070 3071 thread__set_cpu(thread, cpu); 3072 thread__put(thread); 3073 3074 return 0; 3075 } 3076 3077 /* 3078 * Compares the raw arch string. N.B. see instead perf_env__arch() or 3079 * machine__normalized_is() if a normalized arch is needed. 3080 */ 3081 bool machine__is(struct machine *machine, const char *arch) 3082 { 3083 return machine && !strcmp(perf_env__raw_arch(machine->env), arch); 3084 } 3085 3086 bool machine__normalized_is(struct machine *machine, const char *arch) 3087 { 3088 return machine && !strcmp(perf_env__arch(machine->env), arch); 3089 } 3090 3091 int machine__nr_cpus_avail(struct machine *machine) 3092 { 3093 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; 3094 } 3095 3096 int machine__get_kernel_start(struct machine *machine) 3097 { 3098 struct map *map = machine__kernel_map(machine); 3099 int err = 0; 3100 3101 /* 3102 * The only addresses above 2^63 are kernel addresses of a 64-bit 3103 * kernel. Note that addresses are unsigned so that on a 32-bit system 3104 * all addresses including kernel addresses are less than 2^32. In 3105 * that case (32-bit system), if the kernel mapping is unknown, all 3106 * addresses will be assumed to be in user space - see 3107 * machine__kernel_ip(). 3108 */ 3109 machine->kernel_start = 1ULL << 63; 3110 if (map) { 3111 err = map__load(map); 3112 /* 3113 * On x86_64, PTI entry trampolines are less than the 3114 * start of kernel text, but still above 2^63. So leave 3115 * kernel_start = 1ULL << 63 for x86_64. 3116 */ 3117 if (!err && !machine__is(machine, "x86_64")) 3118 machine->kernel_start = map__start(map); 3119 } 3120 return err; 3121 } 3122 3123 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr) 3124 { 3125 u8 addr_cpumode = cpumode; 3126 bool kernel_ip; 3127 3128 if (!machine->single_address_space) 3129 goto out; 3130 3131 kernel_ip = machine__kernel_ip(machine, addr); 3132 switch (cpumode) { 3133 case PERF_RECORD_MISC_KERNEL: 3134 case PERF_RECORD_MISC_USER: 3135 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL : 3136 PERF_RECORD_MISC_USER; 3137 break; 3138 case PERF_RECORD_MISC_GUEST_KERNEL: 3139 case PERF_RECORD_MISC_GUEST_USER: 3140 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL : 3141 PERF_RECORD_MISC_GUEST_USER; 3142 break; 3143 default: 3144 break; 3145 } 3146 out: 3147 return addr_cpumode; 3148 } 3149 3150 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, 3151 const struct dso_id *id) 3152 { 3153 return dsos__findnew_id(&machine->dsos, filename, id); 3154 } 3155 3156 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 3157 { 3158 return machine__findnew_dso_id(machine, filename, NULL); 3159 } 3160 3161 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 3162 { 3163 struct machine *machine = vmachine; 3164 struct map *map; 3165 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); 3166 3167 if (sym == NULL) 3168 return NULL; 3169 3170 *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL; 3171 *addrp = map__unmap_ip(map, sym->start); 3172 return sym->name; 3173 } 3174 3175 struct machine__for_each_dso_cb_args { 3176 struct machine *machine; 3177 machine__dso_t fn; 3178 void *priv; 3179 }; 3180 3181 static int machine__for_each_dso_cb(struct dso *dso, void *data) 3182 { 3183 struct machine__for_each_dso_cb_args *args = data; 3184 3185 return args->fn(dso, args->machine, args->priv); 3186 } 3187 3188 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv) 3189 { 3190 struct machine__for_each_dso_cb_args args = { 3191 .machine = machine, 3192 .fn = fn, 3193 .priv = priv, 3194 }; 3195 3196 return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args); 3197 } 3198 3199 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) 3200 { 3201 struct maps *maps = machine__kernel_maps(machine); 3202 3203 return maps__for_each_map(maps, fn, priv); 3204 } 3205 3206 bool machine__is_lock_function(struct machine *machine, u64 addr) 3207 { 3208 if (!machine->sched.text_start) { 3209 struct map *kmap; 3210 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap); 3211 3212 if (!sym) { 3213 /* to avoid retry */ 3214 machine->sched.text_start = 1; 3215 return false; 3216 } 3217 3218 machine->sched.text_start = map__unmap_ip(kmap, sym->start); 3219 3220 /* should not fail from here */ 3221 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap); 3222 machine->sched.text_end = map__unmap_ip(kmap, sym->start); 3223 3224 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap); 3225 machine->lock.text_start = map__unmap_ip(kmap, sym->start); 3226 3227 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); 3228 machine->lock.text_end = map__unmap_ip(kmap, sym->start); 3229 3230 sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap); 3231 if (sym) { 3232 machine->traceiter.text_start = map__unmap_ip(kmap, sym->start); 3233 machine->traceiter.text_end = map__unmap_ip(kmap, sym->end); 3234 } 3235 sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap); 3236 if (sym) { 3237 machine->trace.text_start = map__unmap_ip(kmap, sym->start); 3238 machine->trace.text_end = map__unmap_ip(kmap, sym->end); 3239 } 3240 } 3241 3242 /* failed to get kernel symbols */ 3243 if (machine->sched.text_start == 1) 3244 return false; 3245 3246 /* mutex and rwsem functions are in sched text section */ 3247 if (machine->sched.text_start <= addr && addr < machine->sched.text_end) 3248 return true; 3249 3250 /* spinlock functions are in lock text section */ 3251 if (machine->lock.text_start <= addr && addr < machine->lock.text_end) 3252 return true; 3253 3254 /* traceiter functions currently don't have their own section 3255 * but we consider them lock functions 3256 */ 3257 if (machine->traceiter.text_start != 0) { 3258 if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end) 3259 return true; 3260 } 3261 3262 if (machine->trace.text_start != 0) { 3263 if (machine->trace.text_start <= addr && addr < machine->trace.text_end) 3264 return true; 3265 } 3266 3267 return false; 3268 } 3269 3270 int machine__hit_all_dsos(struct machine *machine) 3271 { 3272 return dsos__hit_all(&machine->dsos); 3273 } 3274