1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include <stdlib.h> 7 #include "callchain.h" 8 #include "debug.h" 9 #include "dso.h" 10 #include "env.h" 11 #include "event.h" 12 #include "evsel.h" 13 #include "hist.h" 14 #include "machine.h" 15 #include "map.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "mem-events.h" 19 #include "mem-info.h" 20 #include "path.h" 21 #include "srcline.h" 22 #include "symbol.h" 23 #include "sort.h" 24 #include "strlist.h" 25 #include "target.h" 26 #include "thread.h" 27 #include "util.h" 28 #include "vdso.h" 29 #include <stdbool.h> 30 #include <sys/types.h> 31 #include <sys/stat.h> 32 #include <unistd.h> 33 #include "unwind.h" 34 #include "linux/hash.h" 35 #include "asm/bug.h" 36 #include "bpf-event.h" 37 #include <internal/lib.h> // page_size 38 #include "cgroup.h" 39 #include "arm64-frame-pointer-unwind-support.h" 40 41 #include <linux/ctype.h> 42 #include <symbol/kallsyms.h> 43 #include <linux/mman.h> 44 #include <linux/string.h> 45 #include <linux/zalloc.h> 46 47 static struct dso *machine__kernel_dso(struct machine *machine) 48 { 49 return map__dso(machine->vmlinux_map); 50 } 51 52 static int machine__set_mmap_name(struct machine *machine) 53 { 54 if (machine__is_host(machine)) 55 machine->mmap_name = strdup("[kernel.kallsyms]"); 56 else if (machine__is_default_guest(machine)) 57 machine->mmap_name = strdup("[guest.kernel.kallsyms]"); 58 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", 59 machine->pid) < 0) 60 machine->mmap_name = NULL; 61 62 return machine->mmap_name ? 0 : -ENOMEM; 63 } 64 65 static void thread__set_guest_comm(struct thread *thread, pid_t pid) 66 { 67 char comm[64]; 68 69 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 70 thread__set_comm(thread, comm, 0); 71 } 72 73 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 74 { 75 int err = -ENOMEM; 76 77 memset(machine, 0, sizeof(*machine)); 78 machine->kmaps = maps__new(machine); 79 if (machine->kmaps == NULL) 80 return -ENOMEM; 81 82 RB_CLEAR_NODE(&machine->rb_node); 83 dsos__init(&machine->dsos); 84 85 threads__init(&machine->threads); 86 87 machine->vdso_info = NULL; 88 machine->env = NULL; 89 90 machine->pid = pid; 91 92 machine->id_hdr_size = 0; 93 machine->kptr_restrict_warned = false; 94 machine->comm_exec = false; 95 machine->kernel_start = 0; 96 machine->vmlinux_map = NULL; 97 98 machine->root_dir = strdup(root_dir); 99 if (machine->root_dir == NULL) 100 goto out; 101 102 if (machine__set_mmap_name(machine)) 103 goto out; 104 105 if (pid != HOST_KERNEL_ID) { 106 struct thread *thread = machine__findnew_thread(machine, -1, 107 pid); 108 109 if (thread == NULL) 110 goto out; 111 112 thread__set_guest_comm(thread, pid); 113 thread__put(thread); 114 } 115 116 machine->current_tid = NULL; 117 err = 0; 118 119 out: 120 if (err) { 121 zfree(&machine->kmaps); 122 zfree(&machine->root_dir); 123 zfree(&machine->mmap_name); 124 } 125 return 0; 126 } 127 128 struct machine *machine__new_host(void) 129 { 130 struct machine *machine = malloc(sizeof(*machine)); 131 132 if (machine != NULL) { 133 machine__init(machine, "", HOST_KERNEL_ID); 134 135 if (machine__create_kernel_maps(machine) < 0) 136 goto out_delete; 137 138 machine->env = &perf_env; 139 } 140 141 return machine; 142 out_delete: 143 free(machine); 144 return NULL; 145 } 146 147 struct machine *machine__new_kallsyms(void) 148 { 149 struct machine *machine = machine__new_host(); 150 /* 151 * FIXME: 152 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly 153 * ask for not using the kcore parsing code, once this one is fixed 154 * to create a map per module. 155 */ 156 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { 157 machine__delete(machine); 158 machine = NULL; 159 } 160 161 return machine; 162 } 163 164 void machine__delete_threads(struct machine *machine) 165 { 166 threads__remove_all_threads(&machine->threads); 167 } 168 169 void machine__exit(struct machine *machine) 170 { 171 if (machine == NULL) 172 return; 173 174 machine__destroy_kernel_maps(machine); 175 maps__zput(machine->kmaps); 176 dsos__exit(&machine->dsos); 177 machine__exit_vdso(machine); 178 zfree(&machine->root_dir); 179 zfree(&machine->mmap_name); 180 zfree(&machine->current_tid); 181 zfree(&machine->kallsyms_filename); 182 183 threads__exit(&machine->threads); 184 } 185 186 void machine__delete(struct machine *machine) 187 { 188 if (machine) { 189 machine__exit(machine); 190 free(machine); 191 } 192 } 193 194 void machines__init(struct machines *machines) 195 { 196 machine__init(&machines->host, "", HOST_KERNEL_ID); 197 machines->guests = RB_ROOT_CACHED; 198 } 199 200 void machines__exit(struct machines *machines) 201 { 202 machine__exit(&machines->host); 203 /* XXX exit guest */ 204 } 205 206 struct machine *machines__add(struct machines *machines, pid_t pid, 207 const char *root_dir) 208 { 209 struct rb_node **p = &machines->guests.rb_root.rb_node; 210 struct rb_node *parent = NULL; 211 struct machine *pos, *machine = malloc(sizeof(*machine)); 212 bool leftmost = true; 213 214 if (machine == NULL) 215 return NULL; 216 217 if (machine__init(machine, root_dir, pid) != 0) { 218 free(machine); 219 return NULL; 220 } 221 222 while (*p != NULL) { 223 parent = *p; 224 pos = rb_entry(parent, struct machine, rb_node); 225 if (pid < pos->pid) 226 p = &(*p)->rb_left; 227 else { 228 p = &(*p)->rb_right; 229 leftmost = false; 230 } 231 } 232 233 rb_link_node(&machine->rb_node, parent, p); 234 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost); 235 236 machine->machines = machines; 237 238 return machine; 239 } 240 241 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 242 { 243 struct rb_node *nd; 244 245 machines->host.comm_exec = comm_exec; 246 247 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 248 struct machine *machine = rb_entry(nd, struct machine, rb_node); 249 250 machine->comm_exec = comm_exec; 251 } 252 } 253 254 struct machine *machines__find(struct machines *machines, pid_t pid) 255 { 256 struct rb_node **p = &machines->guests.rb_root.rb_node; 257 struct rb_node *parent = NULL; 258 struct machine *machine; 259 struct machine *default_machine = NULL; 260 261 if (pid == HOST_KERNEL_ID) 262 return &machines->host; 263 264 while (*p != NULL) { 265 parent = *p; 266 machine = rb_entry(parent, struct machine, rb_node); 267 if (pid < machine->pid) 268 p = &(*p)->rb_left; 269 else if (pid > machine->pid) 270 p = &(*p)->rb_right; 271 else 272 return machine; 273 if (!machine->pid) 274 default_machine = machine; 275 } 276 277 return default_machine; 278 } 279 280 struct machine *machines__findnew(struct machines *machines, pid_t pid) 281 { 282 char path[PATH_MAX]; 283 const char *root_dir = ""; 284 struct machine *machine = machines__find(machines, pid); 285 286 if (machine && (machine->pid == pid)) 287 goto out; 288 289 if ((pid != HOST_KERNEL_ID) && 290 (pid != DEFAULT_GUEST_KERNEL_ID) && 291 (symbol_conf.guestmount)) { 292 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 293 if (access(path, R_OK)) { 294 static struct strlist *seen; 295 296 if (!seen) 297 seen = strlist__new(NULL, NULL); 298 299 if (!strlist__has_entry(seen, path)) { 300 pr_err("Can't access file %s\n", path); 301 strlist__add(seen, path); 302 } 303 machine = NULL; 304 goto out; 305 } 306 root_dir = path; 307 } 308 309 machine = machines__add(machines, pid, root_dir); 310 out: 311 return machine; 312 } 313 314 struct machine *machines__find_guest(struct machines *machines, pid_t pid) 315 { 316 struct machine *machine = machines__find(machines, pid); 317 318 if (!machine) 319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 320 return machine; 321 } 322 323 /* 324 * A common case for KVM test programs is that the test program acts as the 325 * hypervisor, creating, running and destroying the virtual machine, and 326 * providing the guest object code from its own object code. In this case, 327 * the VM is not running an OS, but only the functions loaded into it by the 328 * hypervisor test program, and conveniently, loaded at the same virtual 329 * addresses. 330 * 331 * Normally to resolve addresses, MMAP events are needed to map addresses 332 * back to the object code and debug symbols for that object code. 333 * 334 * Currently, there is no way to get such mapping information from guests 335 * but, in the scenario described above, the guest has the same mappings 336 * as the hypervisor, so support for that scenario can be achieved. 337 * 338 * To support that, copy the host thread's maps to the guest thread's maps. 339 * Note, we do not discover the guest until we encounter a guest event, 340 * which works well because it is not until then that we know that the host 341 * thread's maps have been set up. 342 * 343 * This function returns the guest thread. Apart from keeping the data 344 * structures sane, using a thread belonging to the guest machine, instead 345 * of the host thread, allows it to have its own comm (refer 346 * thread__set_guest_comm()). 347 */ 348 static struct thread *findnew_guest_code(struct machine *machine, 349 struct machine *host_machine, 350 pid_t pid) 351 { 352 struct thread *host_thread; 353 struct thread *thread; 354 int err; 355 356 if (!machine) 357 return NULL; 358 359 thread = machine__findnew_thread(machine, -1, pid); 360 if (!thread) 361 return NULL; 362 363 /* Assume maps are set up if there are any */ 364 if (!maps__empty(thread__maps(thread))) 365 return thread; 366 367 host_thread = machine__find_thread(host_machine, -1, pid); 368 if (!host_thread) 369 goto out_err; 370 371 thread__set_guest_comm(thread, pid); 372 373 /* 374 * Guest code can be found in hypervisor process at the same address 375 * so copy host maps. 376 */ 377 err = maps__copy_from(thread__maps(thread), thread__maps(host_thread)); 378 thread__put(host_thread); 379 if (err) 380 goto out_err; 381 382 return thread; 383 384 out_err: 385 thread__zput(thread); 386 return NULL; 387 } 388 389 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid) 390 { 391 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID); 392 struct machine *machine = machines__findnew(machines, pid); 393 394 return findnew_guest_code(machine, host_machine, pid); 395 } 396 397 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid) 398 { 399 struct machines *machines = machine->machines; 400 struct machine *host_machine; 401 402 if (!machines) 403 return NULL; 404 405 host_machine = machines__find(machines, HOST_KERNEL_ID); 406 407 return findnew_guest_code(machine, host_machine, pid); 408 } 409 410 void machines__process_guests(struct machines *machines, 411 machine__process_t process, void *data) 412 { 413 struct rb_node *nd; 414 415 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 416 struct machine *pos = rb_entry(nd, struct machine, rb_node); 417 process(pos, data); 418 } 419 } 420 421 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 422 { 423 struct rb_node *node; 424 struct machine *machine; 425 426 machines->host.id_hdr_size = id_hdr_size; 427 428 for (node = rb_first_cached(&machines->guests); node; 429 node = rb_next(node)) { 430 machine = rb_entry(node, struct machine, rb_node); 431 machine->id_hdr_size = id_hdr_size; 432 } 433 434 return; 435 } 436 437 static void machine__update_thread_pid(struct machine *machine, 438 struct thread *th, pid_t pid) 439 { 440 struct thread *leader; 441 442 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1) 443 return; 444 445 thread__set_pid(th, pid); 446 447 if (thread__pid(th) == thread__tid(th)) 448 return; 449 450 leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); 451 if (!leader) 452 goto out_err; 453 454 if (!thread__maps(leader)) 455 thread__set_maps(leader, maps__new(machine)); 456 457 if (!thread__maps(leader)) 458 goto out_err; 459 460 if (thread__maps(th) == thread__maps(leader)) 461 goto out_put; 462 463 if (thread__maps(th)) { 464 /* 465 * Maps are created from MMAP events which provide the pid and 466 * tid. Consequently there never should be any maps on a thread 467 * with an unknown pid. Just print an error if there are. 468 */ 469 if (!maps__empty(thread__maps(th))) 470 pr_err("Discarding thread maps for %d:%d\n", 471 thread__pid(th), thread__tid(th)); 472 maps__put(thread__maps(th)); 473 } 474 475 thread__set_maps(th, maps__get(thread__maps(leader))); 476 out_put: 477 thread__put(leader); 478 return; 479 out_err: 480 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th)); 481 goto out_put; 482 } 483 484 /* 485 * Caller must eventually drop thread->refcnt returned with a successful 486 * lookup/new thread inserted. 487 */ 488 static struct thread *__machine__findnew_thread(struct machine *machine, 489 pid_t pid, 490 pid_t tid, 491 bool create) 492 { 493 struct thread *th = threads__find(&machine->threads, tid); 494 bool created; 495 496 if (th) { 497 machine__update_thread_pid(machine, th, pid); 498 return th; 499 } 500 if (!create) 501 return NULL; 502 503 th = threads__findnew(&machine->threads, pid, tid, &created); 504 if (created) { 505 /* 506 * We have to initialize maps separately after rb tree is 507 * updated. 508 * 509 * The reason is that we call machine__findnew_thread within 510 * thread__init_maps to find the thread leader and that would 511 * screwed the rb tree. 512 */ 513 if (thread__init_maps(th, machine)) { 514 pr_err("Thread init failed thread %d\n", pid); 515 threads__remove(&machine->threads, th); 516 thread__put(th); 517 return NULL; 518 } 519 } else 520 machine__update_thread_pid(machine, th, pid); 521 522 return th; 523 } 524 525 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 526 { 527 return __machine__findnew_thread(machine, pid, tid, /*create=*/true); 528 } 529 530 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 531 pid_t tid) 532 { 533 return __machine__findnew_thread(machine, pid, tid, /*create=*/false); 534 } 535 536 /* 537 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 538 * So here a single thread is created for that, but actually there is a separate 539 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 540 * is only 1. That causes problems for some tools, requiring workarounds. For 541 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 542 */ 543 struct thread *machine__idle_thread(struct machine *machine) 544 { 545 struct thread *thread = machine__findnew_thread(machine, 0, 0); 546 547 if (!thread || thread__set_comm(thread, "swapper", 0) || 548 thread__set_namespaces(thread, 0, NULL)) 549 pr_err("problem inserting idle task for machine pid %d\n", machine->pid); 550 551 return thread; 552 } 553 554 struct comm *machine__thread_exec_comm(struct machine *machine, 555 struct thread *thread) 556 { 557 if (machine->comm_exec) 558 return thread__exec_comm(thread); 559 else 560 return thread__comm(thread); 561 } 562 563 int machine__process_comm_event(struct machine *machine, union perf_event *event, 564 struct perf_sample *sample) 565 { 566 struct thread *thread = machine__findnew_thread(machine, 567 event->comm.pid, 568 event->comm.tid); 569 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 570 int err = 0; 571 572 if (exec) 573 machine->comm_exec = true; 574 575 if (dump_trace) 576 perf_event__fprintf_comm(event, stdout); 577 578 if (thread == NULL || 579 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 580 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 581 err = -1; 582 } 583 584 thread__put(thread); 585 586 return err; 587 } 588 589 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 590 union perf_event *event, 591 struct perf_sample *sample __maybe_unused) 592 { 593 struct thread *thread = machine__findnew_thread(machine, 594 event->namespaces.pid, 595 event->namespaces.tid); 596 int err = 0; 597 598 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 599 "\nWARNING: kernel seems to support more namespaces than perf" 600 " tool.\nTry updating the perf tool..\n\n"); 601 602 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 603 "\nWARNING: perf tool seems to support more namespaces than" 604 " the kernel.\nTry updating the kernel..\n\n"); 605 606 if (dump_trace) 607 perf_event__fprintf_namespaces(event, stdout); 608 609 if (thread == NULL || 610 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 611 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 612 err = -1; 613 } 614 615 thread__put(thread); 616 617 return err; 618 } 619 620 int machine__process_cgroup_event(struct machine *machine, 621 union perf_event *event, 622 struct perf_sample *sample __maybe_unused) 623 { 624 struct cgroup *cgrp; 625 626 if (dump_trace) 627 perf_event__fprintf_cgroup(event, stdout); 628 629 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path); 630 if (cgrp == NULL) 631 return -ENOMEM; 632 633 return 0; 634 } 635 636 int machine__process_lost_event(struct machine *machine __maybe_unused, 637 union perf_event *event, struct perf_sample *sample __maybe_unused) 638 { 639 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n", 640 event->lost.id, event->lost.lost); 641 return 0; 642 } 643 644 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 645 union perf_event *event, struct perf_sample *sample) 646 { 647 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n", 648 sample->id, event->lost_samples.lost, 649 event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : ""); 650 return 0; 651 } 652 653 int machine__process_aux_event(struct machine *machine __maybe_unused, 654 union perf_event *event) 655 { 656 if (dump_trace) 657 perf_event__fprintf_aux(event, stdout); 658 return 0; 659 } 660 661 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 662 union perf_event *event) 663 { 664 if (dump_trace) 665 perf_event__fprintf_itrace_start(event, stdout); 666 return 0; 667 } 668 669 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused, 670 union perf_event *event) 671 { 672 if (dump_trace) 673 perf_event__fprintf_aux_output_hw_id(event, stdout); 674 return 0; 675 } 676 677 int machine__process_switch_event(struct machine *machine __maybe_unused, 678 union perf_event *event) 679 { 680 if (dump_trace) 681 perf_event__fprintf_switch(event, stdout); 682 return 0; 683 } 684 685 static int machine__process_ksymbol_register(struct machine *machine, 686 union perf_event *event, 687 struct perf_sample *sample __maybe_unused) 688 { 689 struct symbol *sym; 690 struct dso *dso = NULL; 691 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 692 int err = 0; 693 694 if (!map) { 695 dso = dso__new(event->ksymbol.name); 696 697 if (!dso) { 698 err = -ENOMEM; 699 goto out; 700 } 701 dso__set_kernel(dso, DSO_SPACE__KERNEL); 702 map = map__new2(0, dso); 703 if (!map) { 704 err = -ENOMEM; 705 goto out; 706 } 707 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { 708 dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL); 709 dso__data(dso)->file_size = event->ksymbol.len; 710 dso__set_loaded(dso); 711 } 712 713 map__set_start(map, event->ksymbol.addr); 714 map__set_end(map, map__start(map) + event->ksymbol.len); 715 err = maps__insert(machine__kernel_maps(machine), map); 716 if (err) { 717 err = -ENOMEM; 718 goto out; 719 } 720 721 dso__set_loaded(dso); 722 723 if (is_bpf_image(event->ksymbol.name)) { 724 dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE); 725 dso__set_long_name(dso, "", false); 726 } 727 } else { 728 dso = dso__get(map__dso(map)); 729 } 730 731 sym = symbol__new(map__map_ip(map, map__start(map)), 732 event->ksymbol.len, 733 0, 0, event->ksymbol.name); 734 if (!sym) { 735 err = -ENOMEM; 736 goto out; 737 } 738 dso__insert_symbol(dso, sym); 739 out: 740 map__put(map); 741 dso__put(dso); 742 return err; 743 } 744 745 static int machine__process_ksymbol_unregister(struct machine *machine, 746 union perf_event *event, 747 struct perf_sample *sample __maybe_unused) 748 { 749 struct symbol *sym; 750 struct map *map; 751 752 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 753 if (!map) 754 return 0; 755 756 if (!RC_CHK_EQUAL(map, machine->vmlinux_map)) 757 maps__remove(machine__kernel_maps(machine), map); 758 else { 759 struct dso *dso = map__dso(map); 760 761 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map))); 762 if (sym) 763 dso__delete_symbol(dso, sym); 764 } 765 map__put(map); 766 return 0; 767 } 768 769 int machine__process_ksymbol(struct machine *machine __maybe_unused, 770 union perf_event *event, 771 struct perf_sample *sample) 772 { 773 if (dump_trace) 774 perf_event__fprintf_ksymbol(event, stdout); 775 776 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER) 777 return machine__process_ksymbol_unregister(machine, event, 778 sample); 779 return machine__process_ksymbol_register(machine, event, sample); 780 } 781 782 int machine__process_text_poke(struct machine *machine, union perf_event *event, 783 struct perf_sample *sample __maybe_unused) 784 { 785 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr); 786 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 787 struct dso *dso = map ? map__dso(map) : NULL; 788 789 if (dump_trace) 790 perf_event__fprintf_text_poke(event, machine, stdout); 791 792 if (!event->text_poke.new_len) 793 goto out; 794 795 if (cpumode != PERF_RECORD_MISC_KERNEL) { 796 pr_debug("%s: unsupported cpumode - ignoring\n", __func__); 797 goto out; 798 } 799 800 if (dso) { 801 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len; 802 int ret; 803 804 /* 805 * Kernel maps might be changed when loading symbols so loading 806 * must be done prior to using kernel maps. 807 */ 808 map__load(map); 809 ret = dso__data_write_cache_addr(dso, map, machine, 810 event->text_poke.addr, 811 new_bytes, 812 event->text_poke.new_len); 813 if (ret != event->text_poke.new_len) 814 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n", 815 event->text_poke.addr); 816 } else { 817 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", 818 event->text_poke.addr); 819 } 820 out: 821 map__put(map); 822 return 0; 823 } 824 825 static struct map *machine__addnew_module_map(struct machine *machine, u64 start, 826 const char *filename) 827 { 828 struct map *map = NULL; 829 struct kmod_path m; 830 struct dso *dso; 831 int err; 832 833 if (kmod_path__parse_name(&m, filename)) 834 return NULL; 835 836 dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename); 837 if (dso == NULL) 838 goto out; 839 840 map = map__new2(start, dso); 841 if (map == NULL) 842 goto out; 843 844 err = maps__insert(machine__kernel_maps(machine), map); 845 /* If maps__insert failed, return NULL. */ 846 if (err) { 847 map__put(map); 848 map = NULL; 849 } 850 out: 851 /* put the dso here, corresponding to machine__findnew_module_dso */ 852 dso__put(dso); 853 zfree(&m.name); 854 return map; 855 } 856 857 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 858 { 859 struct rb_node *nd; 860 size_t ret = dsos__fprintf(&machines->host.dsos, fp); 861 862 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 863 struct machine *pos = rb_entry(nd, struct machine, rb_node); 864 ret += dsos__fprintf(&pos->dsos, fp); 865 } 866 867 return ret; 868 } 869 870 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 871 bool (skip)(struct dso *dso, int parm), int parm) 872 { 873 return dsos__fprintf_buildid(&m->dsos, fp, skip, parm); 874 } 875 876 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 877 bool (skip)(struct dso *dso, int parm), int parm) 878 { 879 struct rb_node *nd; 880 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 881 882 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 883 struct machine *pos = rb_entry(nd, struct machine, rb_node); 884 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 885 } 886 return ret; 887 } 888 889 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 890 { 891 int i; 892 size_t printed = 0; 893 struct dso *kdso = machine__kernel_dso(machine); 894 895 if (dso__has_build_id(kdso)) { 896 char filename[PATH_MAX]; 897 898 if (dso__build_id_filename(kdso, filename, sizeof(filename), false)) 899 printed += fprintf(fp, "[0] %s\n", filename); 900 } 901 902 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 903 printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso), 904 vmlinux_path[i]); 905 } 906 return printed; 907 } 908 909 struct machine_fprintf_cb_args { 910 FILE *fp; 911 size_t printed; 912 }; 913 914 static int machine_fprintf_cb(struct thread *thread, void *data) 915 { 916 struct machine_fprintf_cb_args *args = data; 917 918 /* TODO: handle fprintf errors. */ 919 args->printed += thread__fprintf(thread, args->fp); 920 return 0; 921 } 922 923 size_t machine__fprintf(struct machine *machine, FILE *fp) 924 { 925 struct machine_fprintf_cb_args args = { 926 .fp = fp, 927 .printed = 0, 928 }; 929 size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads)); 930 931 machine__for_each_thread(machine, machine_fprintf_cb, &args); 932 return ret + args.printed; 933 } 934 935 static struct dso *machine__get_kernel(struct machine *machine) 936 { 937 const char *vmlinux_name = machine->mmap_name; 938 struct dso *kernel; 939 940 if (machine__is_host(machine)) { 941 if (symbol_conf.vmlinux_name) 942 vmlinux_name = symbol_conf.vmlinux_name; 943 944 kernel = machine__findnew_kernel(machine, vmlinux_name, 945 "[kernel]", DSO_SPACE__KERNEL); 946 } else { 947 if (symbol_conf.default_guest_vmlinux_name) 948 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 949 950 kernel = machine__findnew_kernel(machine, vmlinux_name, 951 "[guest.kernel]", 952 DSO_SPACE__KERNEL_GUEST); 953 } 954 955 if (kernel != NULL && (!dso__has_build_id(kernel))) 956 dso__read_running_kernel_build_id(kernel, machine); 957 958 return kernel; 959 } 960 961 void machine__get_kallsyms_filename(struct machine *machine, char *buf, 962 size_t bufsz) 963 { 964 if (machine__is_default_guest(machine)) 965 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 966 else 967 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 968 } 969 970 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 971 972 /* Figure out the start address of kernel map from /proc/kallsyms. 973 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 974 * symbol_name if it's not that important. 975 */ 976 static int machine__get_running_kernel_start(struct machine *machine, 977 const char **symbol_name, 978 u64 *start, u64 *end) 979 { 980 char filename[PATH_MAX]; 981 int i, err = -1; 982 const char *name; 983 u64 addr = 0; 984 985 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 986 987 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 988 return 0; 989 990 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 991 err = kallsyms__get_function_start(filename, name, &addr); 992 if (!err) 993 break; 994 } 995 996 if (err) 997 return -1; 998 999 if (symbol_name) 1000 *symbol_name = name; 1001 1002 *start = addr; 1003 1004 err = kallsyms__get_symbol_start(filename, "_edata", &addr); 1005 if (err) 1006 err = kallsyms__get_function_start(filename, "_etext", &addr); 1007 if (!err) 1008 *end = addr; 1009 1010 return 0; 1011 } 1012 1013 int machine__create_extra_kernel_map(struct machine *machine, 1014 struct dso *kernel, 1015 struct extra_kernel_map *xm) 1016 { 1017 struct kmap *kmap; 1018 struct map *map; 1019 int err; 1020 1021 map = map__new2(xm->start, kernel); 1022 if (!map) 1023 return -ENOMEM; 1024 1025 map__set_end(map, xm->end); 1026 map__set_pgoff(map, xm->pgoff); 1027 1028 kmap = map__kmap(map); 1029 1030 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); 1031 1032 err = maps__insert(machine__kernel_maps(machine), map); 1033 1034 if (!err) { 1035 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", 1036 kmap->name, map__start(map), map__end(map)); 1037 } 1038 1039 map__put(map); 1040 1041 return err; 1042 } 1043 1044 static u64 find_entry_trampoline(struct dso *dso) 1045 { 1046 /* Duplicates are removed so lookup all aliases */ 1047 const char *syms[] = { 1048 "_entry_trampoline", 1049 "__entry_trampoline_start", 1050 "entry_SYSCALL_64_trampoline", 1051 }; 1052 struct symbol *sym = dso__first_symbol(dso); 1053 unsigned int i; 1054 1055 for (; sym; sym = dso__next_symbol(sym)) { 1056 if (sym->binding != STB_GLOBAL) 1057 continue; 1058 for (i = 0; i < ARRAY_SIZE(syms); i++) { 1059 if (!strcmp(sym->name, syms[i])) 1060 return sym->start; 1061 } 1062 } 1063 1064 return 0; 1065 } 1066 1067 /* 1068 * These values can be used for kernels that do not have symbols for the entry 1069 * trampolines in kallsyms. 1070 */ 1071 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL 1072 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 1073 #define X86_64_ENTRY_TRAMPOLINE 0x6000 1074 1075 struct machine__map_x86_64_entry_trampolines_args { 1076 struct maps *kmaps; 1077 bool found; 1078 }; 1079 1080 static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data) 1081 { 1082 struct machine__map_x86_64_entry_trampolines_args *args = data; 1083 struct map *dest_map; 1084 struct kmap *kmap = __map__kmap(map); 1085 1086 if (!kmap || !is_entry_trampoline(kmap->name)) 1087 return 0; 1088 1089 dest_map = maps__find(args->kmaps, map__pgoff(map)); 1090 if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map)) 1091 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); 1092 1093 map__put(dest_map); 1094 args->found = true; 1095 return 0; 1096 } 1097 1098 /* Map x86_64 PTI entry trampolines */ 1099 int machine__map_x86_64_entry_trampolines(struct machine *machine, 1100 struct dso *kernel) 1101 { 1102 struct machine__map_x86_64_entry_trampolines_args args = { 1103 .kmaps = machine__kernel_maps(machine), 1104 .found = false, 1105 }; 1106 int nr_cpus_avail, cpu; 1107 u64 pgoff; 1108 1109 /* 1110 * In the vmlinux case, pgoff is a virtual address which must now be 1111 * mapped to a vmlinux offset. 1112 */ 1113 maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args); 1114 1115 if (args.found || machine->trampolines_mapped) 1116 return 0; 1117 1118 pgoff = find_entry_trampoline(kernel); 1119 if (!pgoff) 1120 return 0; 1121 1122 nr_cpus_avail = machine__nr_cpus_avail(machine); 1123 1124 /* Add a 1 page map for each CPU's entry trampoline */ 1125 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { 1126 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + 1127 cpu * X86_64_CPU_ENTRY_AREA_SIZE + 1128 X86_64_ENTRY_TRAMPOLINE; 1129 struct extra_kernel_map xm = { 1130 .start = va, 1131 .end = va + page_size, 1132 .pgoff = pgoff, 1133 }; 1134 1135 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); 1136 1137 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) 1138 return -1; 1139 } 1140 1141 machine->trampolines_mapped = nr_cpus_avail; 1142 1143 return 0; 1144 } 1145 1146 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, 1147 struct dso *kernel __maybe_unused) 1148 { 1149 return 0; 1150 } 1151 1152 static int 1153 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 1154 { 1155 /* In case of renewal the kernel map, destroy previous one */ 1156 machine__destroy_kernel_maps(machine); 1157 1158 map__put(machine->vmlinux_map); 1159 machine->vmlinux_map = map__new2(0, kernel); 1160 if (machine->vmlinux_map == NULL) 1161 return -ENOMEM; 1162 1163 map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY); 1164 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map); 1165 } 1166 1167 void machine__destroy_kernel_maps(struct machine *machine) 1168 { 1169 struct kmap *kmap; 1170 struct map *map = machine__kernel_map(machine); 1171 1172 if (map == NULL) 1173 return; 1174 1175 kmap = map__kmap(map); 1176 maps__remove(machine__kernel_maps(machine), map); 1177 if (kmap && kmap->ref_reloc_sym) { 1178 zfree((char **)&kmap->ref_reloc_sym->name); 1179 zfree(&kmap->ref_reloc_sym); 1180 } 1181 1182 map__zput(machine->vmlinux_map); 1183 } 1184 1185 int machines__create_guest_kernel_maps(struct machines *machines) 1186 { 1187 int ret = 0; 1188 struct dirent **namelist = NULL; 1189 int i, items = 0; 1190 char path[PATH_MAX]; 1191 pid_t pid; 1192 char *endp; 1193 1194 if (symbol_conf.default_guest_vmlinux_name || 1195 symbol_conf.default_guest_modules || 1196 symbol_conf.default_guest_kallsyms) { 1197 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 1198 } 1199 1200 if (symbol_conf.guestmount) { 1201 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 1202 if (items <= 0) 1203 return -ENOENT; 1204 for (i = 0; i < items; i++) { 1205 if (!isdigit(namelist[i]->d_name[0])) { 1206 /* Filter out . and .. */ 1207 continue; 1208 } 1209 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 1210 if ((*endp != '\0') || 1211 (endp == namelist[i]->d_name) || 1212 (errno == ERANGE)) { 1213 pr_debug("invalid directory (%s). Skipping.\n", 1214 namelist[i]->d_name); 1215 continue; 1216 } 1217 sprintf(path, "%s/%s/proc/kallsyms", 1218 symbol_conf.guestmount, 1219 namelist[i]->d_name); 1220 ret = access(path, R_OK); 1221 if (ret) { 1222 pr_debug("Can't access file %s\n", path); 1223 goto failure; 1224 } 1225 machines__create_kernel_maps(machines, pid); 1226 } 1227 failure: 1228 free(namelist); 1229 } 1230 1231 return ret; 1232 } 1233 1234 void machines__destroy_kernel_maps(struct machines *machines) 1235 { 1236 struct rb_node *next = rb_first_cached(&machines->guests); 1237 1238 machine__destroy_kernel_maps(&machines->host); 1239 1240 while (next) { 1241 struct machine *pos = rb_entry(next, struct machine, rb_node); 1242 1243 next = rb_next(&pos->rb_node); 1244 rb_erase_cached(&pos->rb_node, &machines->guests); 1245 machine__delete(pos); 1246 } 1247 } 1248 1249 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 1250 { 1251 struct machine *machine = machines__findnew(machines, pid); 1252 1253 if (machine == NULL) 1254 return -1; 1255 1256 return machine__create_kernel_maps(machine); 1257 } 1258 1259 int machine__load_kallsyms(struct machine *machine, const char *filename) 1260 { 1261 struct map *map = machine__kernel_map(machine); 1262 struct dso *dso = map__dso(map); 1263 int ret = __dso__load_kallsyms(dso, filename, map, true); 1264 1265 if (ret > 0) { 1266 dso__set_loaded(dso); 1267 /* 1268 * Since /proc/kallsyms will have multiple sessions for the 1269 * kernel, with modules between them, fixup the end of all 1270 * sections. 1271 */ 1272 maps__fixup_end(machine__kernel_maps(machine)); 1273 } 1274 1275 return ret; 1276 } 1277 1278 int machine__load_vmlinux_path(struct machine *machine) 1279 { 1280 struct map *map = machine__kernel_map(machine); 1281 struct dso *dso = map__dso(map); 1282 int ret = dso__load_vmlinux_path(dso, map); 1283 1284 if (ret > 0) 1285 dso__set_loaded(dso); 1286 1287 return ret; 1288 } 1289 1290 static char *get_kernel_version(const char *root_dir) 1291 { 1292 char version[PATH_MAX]; 1293 FILE *file; 1294 char *name, *tmp; 1295 const char *prefix = "Linux version "; 1296 1297 sprintf(version, "%s/proc/version", root_dir); 1298 file = fopen(version, "r"); 1299 if (!file) 1300 return NULL; 1301 1302 tmp = fgets(version, sizeof(version), file); 1303 fclose(file); 1304 if (!tmp) 1305 return NULL; 1306 1307 name = strstr(version, prefix); 1308 if (!name) 1309 return NULL; 1310 name += strlen(prefix); 1311 tmp = strchr(name, ' '); 1312 if (tmp) 1313 *tmp = '\0'; 1314 1315 return strdup(name); 1316 } 1317 1318 static bool is_kmod_dso(struct dso *dso) 1319 { 1320 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1321 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE; 1322 } 1323 1324 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) 1325 { 1326 char *long_name; 1327 struct dso *dso; 1328 struct map *map = maps__find_by_name(maps, m->name); 1329 1330 if (map == NULL) 1331 return 0; 1332 1333 long_name = strdup(path); 1334 if (long_name == NULL) { 1335 map__put(map); 1336 return -ENOMEM; 1337 } 1338 1339 dso = map__dso(map); 1340 dso__set_long_name(dso, long_name, true); 1341 dso__kernel_module_get_build_id(dso, ""); 1342 1343 /* 1344 * Full name could reveal us kmod compression, so 1345 * we need to update the symtab_type if needed. 1346 */ 1347 if (m->comp && is_kmod_dso(dso)) { 1348 dso__set_symtab_type(dso, dso__symtab_type(dso)+1); 1349 dso__set_comp(dso, m->comp); 1350 } 1351 map__put(map); 1352 return 0; 1353 } 1354 1355 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth) 1356 { 1357 struct dirent *dent; 1358 DIR *dir = opendir(dir_name); 1359 int ret = 0; 1360 1361 if (!dir) { 1362 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1363 return -1; 1364 } 1365 1366 while ((dent = readdir(dir)) != NULL) { 1367 char path[PATH_MAX]; 1368 struct stat st; 1369 1370 /*sshfs might return bad dent->d_type, so we have to stat*/ 1371 path__join(path, sizeof(path), dir_name, dent->d_name); 1372 if (stat(path, &st)) 1373 continue; 1374 1375 if (S_ISDIR(st.st_mode)) { 1376 if (!strcmp(dent->d_name, ".") || 1377 !strcmp(dent->d_name, "..")) 1378 continue; 1379 1380 /* Do not follow top-level source and build symlinks */ 1381 if (depth == 0) { 1382 if (!strcmp(dent->d_name, "source") || 1383 !strcmp(dent->d_name, "build")) 1384 continue; 1385 } 1386 1387 ret = maps__set_modules_path_dir(maps, path, depth + 1); 1388 if (ret < 0) 1389 goto out; 1390 } else { 1391 struct kmod_path m; 1392 1393 ret = kmod_path__parse_name(&m, dent->d_name); 1394 if (ret) 1395 goto out; 1396 1397 if (m.kmod) 1398 ret = maps__set_module_path(maps, path, &m); 1399 1400 zfree(&m.name); 1401 1402 if (ret) 1403 goto out; 1404 } 1405 } 1406 1407 out: 1408 closedir(dir); 1409 return ret; 1410 } 1411 1412 static int machine__set_modules_path(struct machine *machine) 1413 { 1414 char *version; 1415 char modules_path[PATH_MAX]; 1416 1417 version = get_kernel_version(machine->root_dir); 1418 if (!version) 1419 return -1; 1420 1421 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1422 machine->root_dir, version); 1423 free(version); 1424 1425 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0); 1426 } 1427 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1428 u64 *size __maybe_unused, 1429 const char *name __maybe_unused) 1430 { 1431 return 0; 1432 } 1433 1434 static int machine__create_module(void *arg, const char *name, u64 start, 1435 u64 size) 1436 { 1437 struct machine *machine = arg; 1438 struct map *map; 1439 1440 if (arch__fix_module_text_start(&start, &size, name) < 0) 1441 return -1; 1442 1443 map = machine__addnew_module_map(machine, start, name); 1444 if (map == NULL) 1445 return -1; 1446 map__set_end(map, start + size); 1447 1448 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir); 1449 map__put(map); 1450 return 0; 1451 } 1452 1453 static int machine__create_modules(struct machine *machine) 1454 { 1455 const char *modules; 1456 char path[PATH_MAX]; 1457 1458 if (machine__is_default_guest(machine)) { 1459 modules = symbol_conf.default_guest_modules; 1460 } else { 1461 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1462 modules = path; 1463 } 1464 1465 if (symbol__restricted_filename(modules, "/proc/modules")) 1466 return -1; 1467 1468 if (modules__parse(modules, machine, machine__create_module)) 1469 return -1; 1470 1471 if (!machine__set_modules_path(machine)) 1472 return 0; 1473 1474 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1475 1476 return 0; 1477 } 1478 1479 static void machine__set_kernel_mmap(struct machine *machine, 1480 u64 start, u64 end) 1481 { 1482 map__set_start(machine->vmlinux_map, start); 1483 map__set_end(machine->vmlinux_map, end); 1484 /* 1485 * Be a bit paranoid here, some perf.data file came with 1486 * a zero sized synthesized MMAP event for the kernel. 1487 */ 1488 if (start == 0 && end == 0) 1489 map__set_end(machine->vmlinux_map, ~0ULL); 1490 } 1491 1492 static int machine__update_kernel_mmap(struct machine *machine, 1493 u64 start, u64 end) 1494 { 1495 struct map *orig, *updated; 1496 int err; 1497 1498 orig = machine->vmlinux_map; 1499 updated = map__get(orig); 1500 1501 machine->vmlinux_map = updated; 1502 maps__remove(machine__kernel_maps(machine), orig); 1503 machine__set_kernel_mmap(machine, start, end); 1504 err = maps__insert(machine__kernel_maps(machine), updated); 1505 map__put(orig); 1506 1507 return err; 1508 } 1509 1510 int machine__create_kernel_maps(struct machine *machine) 1511 { 1512 struct dso *kernel = machine__get_kernel(machine); 1513 const char *name = NULL; 1514 u64 start = 0, end = ~0ULL; 1515 int ret; 1516 1517 if (kernel == NULL) 1518 return -1; 1519 1520 ret = __machine__create_kernel_maps(machine, kernel); 1521 if (ret < 0) 1522 goto out_put; 1523 1524 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1525 if (machine__is_host(machine)) 1526 pr_debug("Problems creating module maps, " 1527 "continuing anyway...\n"); 1528 else 1529 pr_debug("Problems creating module maps for guest %d, " 1530 "continuing anyway...\n", machine->pid); 1531 } 1532 1533 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { 1534 if (name && 1535 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { 1536 machine__destroy_kernel_maps(machine); 1537 ret = -1; 1538 goto out_put; 1539 } 1540 1541 /* 1542 * we have a real start address now, so re-order the kmaps 1543 * assume it's the last in the kmaps 1544 */ 1545 ret = machine__update_kernel_mmap(machine, start, end); 1546 if (ret < 0) 1547 goto out_put; 1548 } 1549 1550 if (machine__create_extra_kernel_maps(machine, kernel)) 1551 pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); 1552 1553 if (end == ~0ULL) { 1554 /* update end address of the kernel map using adjacent module address */ 1555 struct map *next = maps__find_next_entry(machine__kernel_maps(machine), 1556 machine__kernel_map(machine)); 1557 1558 if (next) { 1559 machine__set_kernel_mmap(machine, start, map__start(next)); 1560 map__put(next); 1561 } 1562 } 1563 1564 out_put: 1565 dso__put(kernel); 1566 return ret; 1567 } 1568 1569 static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused) 1570 { 1571 return dso__is_kcore(dso) ? 1 : 0; 1572 } 1573 1574 static bool machine__uses_kcore(struct machine *machine) 1575 { 1576 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false; 1577 } 1578 1579 static bool perf_event__is_extra_kernel_mmap(struct machine *machine, 1580 struct extra_kernel_map *xm) 1581 { 1582 return machine__is(machine, "x86_64") && 1583 is_entry_trampoline(xm->name); 1584 } 1585 1586 static int machine__process_extra_kernel_map(struct machine *machine, 1587 struct extra_kernel_map *xm) 1588 { 1589 struct dso *kernel = machine__kernel_dso(machine); 1590 1591 if (kernel == NULL) 1592 return -1; 1593 1594 return machine__create_extra_kernel_map(machine, kernel, xm); 1595 } 1596 1597 static int machine__process_kernel_mmap_event(struct machine *machine, 1598 struct extra_kernel_map *xm, 1599 struct build_id *bid) 1600 { 1601 enum dso_space_type dso_space; 1602 bool is_kernel_mmap; 1603 const char *mmap_name = machine->mmap_name; 1604 1605 /* If we have maps from kcore then we do not need or want any others */ 1606 if (machine__uses_kcore(machine)) 1607 return 0; 1608 1609 if (machine__is_host(machine)) 1610 dso_space = DSO_SPACE__KERNEL; 1611 else 1612 dso_space = DSO_SPACE__KERNEL_GUEST; 1613 1614 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1615 if (!is_kernel_mmap && !machine__is_host(machine)) { 1616 /* 1617 * If the event was recorded inside the guest and injected into 1618 * the host perf.data file, then it will match a host mmap_name, 1619 * so try that - see machine__set_mmap_name(). 1620 */ 1621 mmap_name = "[kernel.kallsyms]"; 1622 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1623 } 1624 if (xm->name[0] == '/' || 1625 (!is_kernel_mmap && xm->name[0] == '[')) { 1626 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name); 1627 1628 if (map == NULL) 1629 goto out_problem; 1630 1631 map__set_end(map, map__start(map) + xm->end - xm->start); 1632 1633 if (build_id__is_defined(bid)) 1634 dso__set_build_id(map__dso(map), bid); 1635 1636 map__put(map); 1637 } else if (is_kernel_mmap) { 1638 const char *symbol_name = xm->name + strlen(mmap_name); 1639 /* 1640 * Should be there already, from the build-id table in 1641 * the header. 1642 */ 1643 struct dso *kernel = dsos__find_kernel_dso(&machine->dsos); 1644 1645 if (kernel == NULL) 1646 kernel = machine__findnew_dso(machine, machine->mmap_name); 1647 if (kernel == NULL) 1648 goto out_problem; 1649 1650 dso__set_kernel(kernel, dso_space); 1651 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1652 dso__put(kernel); 1653 goto out_problem; 1654 } 1655 1656 if (strstr(dso__long_name(kernel), "vmlinux")) 1657 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1658 1659 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { 1660 dso__put(kernel); 1661 goto out_problem; 1662 } 1663 1664 if (build_id__is_defined(bid)) 1665 dso__set_build_id(kernel, bid); 1666 1667 /* 1668 * Avoid using a zero address (kptr_restrict) for the ref reloc 1669 * symbol. Effectively having zero here means that at record 1670 * time /proc/sys/kernel/kptr_restrict was non zero. 1671 */ 1672 if (xm->pgoff != 0) { 1673 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, 1674 symbol_name, 1675 xm->pgoff); 1676 } 1677 1678 if (machine__is_default_guest(machine)) { 1679 /* 1680 * preload dso of guest kernel and modules 1681 */ 1682 dso__load(kernel, machine__kernel_map(machine)); 1683 } 1684 dso__put(kernel); 1685 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) { 1686 return machine__process_extra_kernel_map(machine, xm); 1687 } 1688 return 0; 1689 out_problem: 1690 return -1; 1691 } 1692 1693 int machine__process_mmap2_event(struct machine *machine, 1694 union perf_event *event, 1695 struct perf_sample *sample) 1696 { 1697 struct thread *thread; 1698 struct map *map; 1699 struct dso_id dso_id = { 1700 .maj = event->mmap2.maj, 1701 .min = event->mmap2.min, 1702 .ino = event->mmap2.ino, 1703 .ino_generation = event->mmap2.ino_generation, 1704 }; 1705 struct build_id __bid, *bid = NULL; 1706 int ret = 0; 1707 1708 if (dump_trace) 1709 perf_event__fprintf_mmap2(event, stdout); 1710 1711 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 1712 bid = &__bid; 1713 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size); 1714 } 1715 1716 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1717 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1718 struct extra_kernel_map xm = { 1719 .start = event->mmap2.start, 1720 .end = event->mmap2.start + event->mmap2.len, 1721 .pgoff = event->mmap2.pgoff, 1722 }; 1723 1724 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN); 1725 ret = machine__process_kernel_mmap_event(machine, &xm, bid); 1726 if (ret < 0) 1727 goto out_problem; 1728 return 0; 1729 } 1730 1731 thread = machine__findnew_thread(machine, event->mmap2.pid, 1732 event->mmap2.tid); 1733 if (thread == NULL) 1734 goto out_problem; 1735 1736 map = map__new(machine, event->mmap2.start, 1737 event->mmap2.len, event->mmap2.pgoff, 1738 &dso_id, event->mmap2.prot, 1739 event->mmap2.flags, bid, 1740 event->mmap2.filename, thread); 1741 1742 if (map == NULL) 1743 goto out_problem_map; 1744 1745 ret = thread__insert_map(thread, map); 1746 if (ret) 1747 goto out_problem_insert; 1748 1749 thread__put(thread); 1750 map__put(map); 1751 return 0; 1752 1753 out_problem_insert: 1754 map__put(map); 1755 out_problem_map: 1756 thread__put(thread); 1757 out_problem: 1758 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1759 return 0; 1760 } 1761 1762 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1763 struct perf_sample *sample) 1764 { 1765 struct thread *thread; 1766 struct map *map; 1767 u32 prot = 0; 1768 int ret = 0; 1769 1770 if (dump_trace) 1771 perf_event__fprintf_mmap(event, stdout); 1772 1773 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1774 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1775 struct extra_kernel_map xm = { 1776 .start = event->mmap.start, 1777 .end = event->mmap.start + event->mmap.len, 1778 .pgoff = event->mmap.pgoff, 1779 }; 1780 1781 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); 1782 ret = machine__process_kernel_mmap_event(machine, &xm, NULL); 1783 if (ret < 0) 1784 goto out_problem; 1785 return 0; 1786 } 1787 1788 thread = machine__findnew_thread(machine, event->mmap.pid, 1789 event->mmap.tid); 1790 if (thread == NULL) 1791 goto out_problem; 1792 1793 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) 1794 prot = PROT_EXEC; 1795 1796 map = map__new(machine, event->mmap.start, 1797 event->mmap.len, event->mmap.pgoff, 1798 NULL, prot, 0, NULL, event->mmap.filename, thread); 1799 1800 if (map == NULL) 1801 goto out_problem_map; 1802 1803 ret = thread__insert_map(thread, map); 1804 if (ret) 1805 goto out_problem_insert; 1806 1807 thread__put(thread); 1808 map__put(map); 1809 return 0; 1810 1811 out_problem_insert: 1812 map__put(map); 1813 out_problem_map: 1814 thread__put(thread); 1815 out_problem: 1816 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1817 return 0; 1818 } 1819 1820 void machine__remove_thread(struct machine *machine, struct thread *th) 1821 { 1822 return threads__remove(&machine->threads, th); 1823 } 1824 1825 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1826 struct perf_sample *sample) 1827 { 1828 struct thread *thread = machine__find_thread(machine, 1829 event->fork.pid, 1830 event->fork.tid); 1831 struct thread *parent = machine__findnew_thread(machine, 1832 event->fork.ppid, 1833 event->fork.ptid); 1834 bool do_maps_clone = true; 1835 int err = 0; 1836 1837 if (dump_trace) 1838 perf_event__fprintf_task(event, stdout); 1839 1840 /* 1841 * There may be an existing thread that is not actually the parent, 1842 * either because we are processing events out of order, or because the 1843 * (fork) event that would have removed the thread was lost. Assume the 1844 * latter case and continue on as best we can. 1845 */ 1846 if (thread__pid(parent) != (pid_t)event->fork.ppid) { 1847 dump_printf("removing erroneous parent thread %d/%d\n", 1848 thread__pid(parent), thread__tid(parent)); 1849 machine__remove_thread(machine, parent); 1850 thread__put(parent); 1851 parent = machine__findnew_thread(machine, event->fork.ppid, 1852 event->fork.ptid); 1853 } 1854 1855 /* if a thread currently exists for the thread id remove it */ 1856 if (thread != NULL) { 1857 machine__remove_thread(machine, thread); 1858 thread__put(thread); 1859 } 1860 1861 thread = machine__findnew_thread(machine, event->fork.pid, 1862 event->fork.tid); 1863 /* 1864 * When synthesizing FORK events, we are trying to create thread 1865 * objects for the already running tasks on the machine. 1866 * 1867 * Normally, for a kernel FORK event, we want to clone the parent's 1868 * maps because that is what the kernel just did. 1869 * 1870 * But when synthesizing, this should not be done. If we do, we end up 1871 * with overlapping maps as we process the synthesized MMAP2 events that 1872 * get delivered shortly thereafter. 1873 * 1874 * Use the FORK event misc flags in an internal way to signal this 1875 * situation, so we can elide the map clone when appropriate. 1876 */ 1877 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) 1878 do_maps_clone = false; 1879 1880 if (thread == NULL || parent == NULL || 1881 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { 1882 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1883 err = -1; 1884 } 1885 thread__put(thread); 1886 thread__put(parent); 1887 1888 return err; 1889 } 1890 1891 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1892 struct perf_sample *sample __maybe_unused) 1893 { 1894 struct thread *thread = machine__find_thread(machine, 1895 event->fork.pid, 1896 event->fork.tid); 1897 1898 if (dump_trace) 1899 perf_event__fprintf_task(event, stdout); 1900 1901 if (thread != NULL) { 1902 if (symbol_conf.keep_exited_threads) 1903 thread__set_exited(thread, /*exited=*/true); 1904 else 1905 machine__remove_thread(machine, thread); 1906 } 1907 thread__put(thread); 1908 return 0; 1909 } 1910 1911 int machine__process_event(struct machine *machine, union perf_event *event, 1912 struct perf_sample *sample) 1913 { 1914 int ret; 1915 1916 switch (event->header.type) { 1917 case PERF_RECORD_COMM: 1918 ret = machine__process_comm_event(machine, event, sample); break; 1919 case PERF_RECORD_MMAP: 1920 ret = machine__process_mmap_event(machine, event, sample); break; 1921 case PERF_RECORD_NAMESPACES: 1922 ret = machine__process_namespaces_event(machine, event, sample); break; 1923 case PERF_RECORD_CGROUP: 1924 ret = machine__process_cgroup_event(machine, event, sample); break; 1925 case PERF_RECORD_MMAP2: 1926 ret = machine__process_mmap2_event(machine, event, sample); break; 1927 case PERF_RECORD_FORK: 1928 ret = machine__process_fork_event(machine, event, sample); break; 1929 case PERF_RECORD_EXIT: 1930 ret = machine__process_exit_event(machine, event, sample); break; 1931 case PERF_RECORD_LOST: 1932 ret = machine__process_lost_event(machine, event, sample); break; 1933 case PERF_RECORD_AUX: 1934 ret = machine__process_aux_event(machine, event); break; 1935 case PERF_RECORD_ITRACE_START: 1936 ret = machine__process_itrace_start_event(machine, event); break; 1937 case PERF_RECORD_LOST_SAMPLES: 1938 ret = machine__process_lost_samples_event(machine, event, sample); break; 1939 case PERF_RECORD_SWITCH: 1940 case PERF_RECORD_SWITCH_CPU_WIDE: 1941 ret = machine__process_switch_event(machine, event); break; 1942 case PERF_RECORD_KSYMBOL: 1943 ret = machine__process_ksymbol(machine, event, sample); break; 1944 case PERF_RECORD_BPF_EVENT: 1945 ret = machine__process_bpf(machine, event, sample); break; 1946 case PERF_RECORD_TEXT_POKE: 1947 ret = machine__process_text_poke(machine, event, sample); break; 1948 case PERF_RECORD_AUX_OUTPUT_HW_ID: 1949 ret = machine__process_aux_output_hw_id_event(machine, event); break; 1950 default: 1951 ret = -1; 1952 break; 1953 } 1954 1955 return ret; 1956 } 1957 1958 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1959 { 1960 return regexec(regex, sym->name, 0, NULL, 0) == 0; 1961 } 1962 1963 static void ip__resolve_ams(struct thread *thread, 1964 struct addr_map_symbol *ams, 1965 u64 ip) 1966 { 1967 struct addr_location al; 1968 1969 addr_location__init(&al); 1970 /* 1971 * We cannot use the header.misc hint to determine whether a 1972 * branch stack address is user, kernel, guest, hypervisor. 1973 * Branches may straddle the kernel/user/hypervisor boundaries. 1974 * Thus, we have to try consecutively until we find a match 1975 * or else, the symbol is unknown 1976 */ 1977 thread__find_cpumode_addr_location(thread, ip, &al); 1978 1979 ams->addr = ip; 1980 ams->al_addr = al.addr; 1981 ams->al_level = al.level; 1982 ams->ms.maps = maps__get(al.maps); 1983 ams->ms.sym = al.sym; 1984 ams->ms.map = map__get(al.map); 1985 ams->phys_addr = 0; 1986 ams->data_page_size = 0; 1987 addr_location__exit(&al); 1988 } 1989 1990 static void ip__resolve_data(struct thread *thread, 1991 u8 m, struct addr_map_symbol *ams, 1992 u64 addr, u64 phys_addr, u64 daddr_page_size) 1993 { 1994 struct addr_location al; 1995 1996 addr_location__init(&al); 1997 1998 thread__find_symbol(thread, m, addr, &al); 1999 2000 ams->addr = addr; 2001 ams->al_addr = al.addr; 2002 ams->al_level = al.level; 2003 ams->ms.maps = maps__get(al.maps); 2004 ams->ms.sym = al.sym; 2005 ams->ms.map = map__get(al.map); 2006 ams->phys_addr = phys_addr; 2007 ams->data_page_size = daddr_page_size; 2008 addr_location__exit(&al); 2009 } 2010 2011 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 2012 struct addr_location *al) 2013 { 2014 struct mem_info *mi = mem_info__new(); 2015 2016 if (!mi) 2017 return NULL; 2018 2019 ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip); 2020 ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi), 2021 sample->addr, sample->phys_addr, 2022 sample->data_page_size); 2023 mem_info__data_src(mi)->val = sample->data_src; 2024 2025 return mi; 2026 } 2027 2028 static char *callchain_srcline(struct map_symbol *ms, u64 ip) 2029 { 2030 struct map *map = ms->map; 2031 char *srcline = NULL; 2032 struct dso *dso; 2033 2034 if (!map || callchain_param.key == CCKEY_FUNCTION) 2035 return srcline; 2036 2037 dso = map__dso(map); 2038 srcline = srcline__tree_find(dso__srclines(dso), ip); 2039 if (!srcline) { 2040 bool show_sym = false; 2041 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 2042 2043 srcline = get_srcline(dso, map__rip_2objdump(map, ip), 2044 ms->sym, show_sym, show_addr, ip); 2045 srcline__tree_insert(dso__srclines(dso), ip, srcline); 2046 } 2047 2048 return srcline; 2049 } 2050 2051 struct iterations { 2052 int nr_loop_iter; 2053 u64 cycles; 2054 }; 2055 2056 static int add_callchain_ip(struct thread *thread, 2057 struct callchain_cursor *cursor, 2058 struct symbol **parent, 2059 struct addr_location *root_al, 2060 u8 *cpumode, 2061 u64 ip, 2062 bool branch, 2063 struct branch_flags *flags, 2064 struct iterations *iter, 2065 u64 branch_from, 2066 bool symbols) 2067 { 2068 struct map_symbol ms = {}; 2069 struct addr_location al; 2070 int nr_loop_iter = 0, err = 0; 2071 u64 iter_cycles = 0; 2072 const char *srcline = NULL; 2073 2074 addr_location__init(&al); 2075 al.filtered = 0; 2076 al.sym = NULL; 2077 al.srcline = NULL; 2078 if (!cpumode) { 2079 thread__find_cpumode_addr_location(thread, ip, &al); 2080 } else { 2081 if (ip >= PERF_CONTEXT_MAX) { 2082 switch (ip) { 2083 case PERF_CONTEXT_HV: 2084 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 2085 break; 2086 case PERF_CONTEXT_KERNEL: 2087 *cpumode = PERF_RECORD_MISC_KERNEL; 2088 break; 2089 case PERF_CONTEXT_USER: 2090 *cpumode = PERF_RECORD_MISC_USER; 2091 break; 2092 default: 2093 pr_debug("invalid callchain context: " 2094 "%"PRId64"\n", (s64) ip); 2095 /* 2096 * It seems the callchain is corrupted. 2097 * Discard all. 2098 */ 2099 callchain_cursor_reset(cursor); 2100 err = 1; 2101 goto out; 2102 } 2103 goto out; 2104 } 2105 if (symbols) 2106 thread__find_symbol(thread, *cpumode, ip, &al); 2107 } 2108 2109 if (al.sym != NULL) { 2110 if (perf_hpp_list.parent && !*parent && 2111 symbol__match_regex(al.sym, &parent_regex)) 2112 *parent = al.sym; 2113 else if (have_ignore_callees && root_al && 2114 symbol__match_regex(al.sym, &ignore_callees_regex)) { 2115 /* Treat this symbol as the root, 2116 forgetting its callees. */ 2117 addr_location__copy(root_al, &al); 2118 callchain_cursor_reset(cursor); 2119 } 2120 } 2121 2122 if (symbol_conf.hide_unresolved && al.sym == NULL) 2123 goto out; 2124 2125 if (iter) { 2126 nr_loop_iter = iter->nr_loop_iter; 2127 iter_cycles = iter->cycles; 2128 } 2129 2130 ms.maps = maps__get(al.maps); 2131 ms.map = map__get(al.map); 2132 ms.sym = al.sym; 2133 srcline = callchain_srcline(&ms, al.addr); 2134 err = callchain_cursor_append(cursor, ip, &ms, 2135 branch, flags, nr_loop_iter, 2136 iter_cycles, branch_from, srcline); 2137 out: 2138 addr_location__exit(&al); 2139 map_symbol__exit(&ms); 2140 return err; 2141 } 2142 2143 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 2144 struct addr_location *al) 2145 { 2146 unsigned int i; 2147 const struct branch_stack *bs = sample->branch_stack; 2148 struct branch_entry *entries = perf_sample__branch_entries(sample); 2149 u64 *branch_stack_cntr = sample->branch_stack_cntr; 2150 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 2151 2152 if (!bi) 2153 return NULL; 2154 2155 for (i = 0; i < bs->nr; i++) { 2156 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to); 2157 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from); 2158 bi[i].flags = entries[i].flags; 2159 if (branch_stack_cntr) 2160 bi[i].branch_stack_cntr = branch_stack_cntr[i]; 2161 } 2162 return bi; 2163 } 2164 2165 static void save_iterations(struct iterations *iter, 2166 struct branch_entry *be, int nr) 2167 { 2168 int i; 2169 2170 iter->nr_loop_iter++; 2171 iter->cycles = 0; 2172 2173 for (i = 0; i < nr; i++) 2174 iter->cycles += be[i].flags.cycles; 2175 } 2176 2177 #define CHASHSZ 127 2178 #define CHASHBITS 7 2179 #define NO_ENTRY 0xff 2180 2181 #define PERF_MAX_BRANCH_DEPTH 127 2182 2183 /* Remove loops. */ 2184 static int remove_loops(struct branch_entry *l, int nr, 2185 struct iterations *iter) 2186 { 2187 int i, j, off; 2188 unsigned char chash[CHASHSZ]; 2189 2190 memset(chash, NO_ENTRY, sizeof(chash)); 2191 2192 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 2193 2194 for (i = 0; i < nr; i++) { 2195 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 2196 2197 /* no collision handling for now */ 2198 if (chash[h] == NO_ENTRY) { 2199 chash[h] = i; 2200 } else if (l[chash[h]].from == l[i].from) { 2201 bool is_loop = true; 2202 /* check if it is a real loop */ 2203 off = 0; 2204 for (j = chash[h]; j < i && i + off < nr; j++, off++) 2205 if (l[j].from != l[i + off].from) { 2206 is_loop = false; 2207 break; 2208 } 2209 if (is_loop) { 2210 j = nr - (i + off); 2211 if (j > 0) { 2212 save_iterations(iter + i + off, 2213 l + i, off); 2214 2215 memmove(iter + i, iter + i + off, 2216 j * sizeof(*iter)); 2217 2218 memmove(l + i, l + i + off, 2219 j * sizeof(*l)); 2220 } 2221 2222 nr -= off; 2223 } 2224 } 2225 } 2226 return nr; 2227 } 2228 2229 static int lbr_callchain_add_kernel_ip(struct thread *thread, 2230 struct callchain_cursor *cursor, 2231 struct perf_sample *sample, 2232 struct symbol **parent, 2233 struct addr_location *root_al, 2234 u64 branch_from, 2235 bool callee, int end, 2236 bool symbols) 2237 { 2238 struct ip_callchain *chain = sample->callchain; 2239 u8 cpumode = PERF_RECORD_MISC_USER; 2240 int err, i; 2241 2242 if (callee) { 2243 for (i = 0; i < end + 1; i++) { 2244 err = add_callchain_ip(thread, cursor, parent, 2245 root_al, &cpumode, chain->ips[i], 2246 false, NULL, NULL, branch_from, 2247 symbols); 2248 if (err) 2249 return err; 2250 } 2251 return 0; 2252 } 2253 2254 for (i = end; i >= 0; i--) { 2255 err = add_callchain_ip(thread, cursor, parent, 2256 root_al, &cpumode, chain->ips[i], 2257 false, NULL, NULL, branch_from, 2258 symbols); 2259 if (err) 2260 return err; 2261 } 2262 2263 return 0; 2264 } 2265 2266 static void save_lbr_cursor_node(struct thread *thread, 2267 struct callchain_cursor *cursor, 2268 int idx) 2269 { 2270 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2271 2272 if (!lbr_stitch) 2273 return; 2274 2275 if (cursor->pos == cursor->nr) { 2276 lbr_stitch->prev_lbr_cursor[idx].valid = false; 2277 return; 2278 } 2279 2280 if (!cursor->curr) 2281 cursor->curr = cursor->first; 2282 else 2283 cursor->curr = cursor->curr->next; 2284 2285 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms); 2286 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr, 2287 sizeof(struct callchain_cursor_node)); 2288 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps); 2289 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map); 2290 2291 lbr_stitch->prev_lbr_cursor[idx].valid = true; 2292 cursor->pos++; 2293 } 2294 2295 static int lbr_callchain_add_lbr_ip(struct thread *thread, 2296 struct callchain_cursor *cursor, 2297 struct perf_sample *sample, 2298 struct symbol **parent, 2299 struct addr_location *root_al, 2300 u64 *branch_from, 2301 bool callee, 2302 bool symbols) 2303 { 2304 struct branch_stack *lbr_stack = sample->branch_stack; 2305 struct branch_entry *entries = perf_sample__branch_entries(sample); 2306 u8 cpumode = PERF_RECORD_MISC_USER; 2307 int lbr_nr = lbr_stack->nr; 2308 struct branch_flags *flags; 2309 int err, i; 2310 u64 ip; 2311 2312 /* 2313 * The curr and pos are not used in writing session. They are cleared 2314 * in callchain_cursor_commit() when the writing session is closed. 2315 * Using curr and pos to track the current cursor node. 2316 */ 2317 if (thread__lbr_stitch(thread)) { 2318 cursor->curr = NULL; 2319 cursor->pos = cursor->nr; 2320 if (cursor->nr) { 2321 cursor->curr = cursor->first; 2322 for (i = 0; i < (int)(cursor->nr - 1); i++) 2323 cursor->curr = cursor->curr->next; 2324 } 2325 } 2326 2327 if (callee) { 2328 /* Add LBR ip from first entries.to */ 2329 ip = entries[0].to; 2330 flags = &entries[0].flags; 2331 *branch_from = entries[0].from; 2332 err = add_callchain_ip(thread, cursor, parent, 2333 root_al, &cpumode, ip, 2334 true, flags, NULL, 2335 *branch_from, symbols); 2336 if (err) 2337 return err; 2338 2339 /* 2340 * The number of cursor node increases. 2341 * Move the current cursor node. 2342 * But does not need to save current cursor node for entry 0. 2343 * It's impossible to stitch the whole LBRs of previous sample. 2344 */ 2345 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) { 2346 if (!cursor->curr) 2347 cursor->curr = cursor->first; 2348 else 2349 cursor->curr = cursor->curr->next; 2350 cursor->pos++; 2351 } 2352 2353 /* Add LBR ip from entries.from one by one. */ 2354 for (i = 0; i < lbr_nr; i++) { 2355 ip = entries[i].from; 2356 flags = &entries[i].flags; 2357 err = add_callchain_ip(thread, cursor, parent, 2358 root_al, &cpumode, ip, 2359 true, flags, NULL, 2360 *branch_from, symbols); 2361 if (err) 2362 return err; 2363 save_lbr_cursor_node(thread, cursor, i); 2364 } 2365 return 0; 2366 } 2367 2368 /* Add LBR ip from entries.from one by one. */ 2369 for (i = lbr_nr - 1; i >= 0; i--) { 2370 ip = entries[i].from; 2371 flags = &entries[i].flags; 2372 err = add_callchain_ip(thread, cursor, parent, 2373 root_al, &cpumode, ip, 2374 true, flags, NULL, 2375 *branch_from, symbols); 2376 if (err) 2377 return err; 2378 save_lbr_cursor_node(thread, cursor, i); 2379 } 2380 2381 if (lbr_nr > 0) { 2382 /* Add LBR ip from first entries.to */ 2383 ip = entries[0].to; 2384 flags = &entries[0].flags; 2385 *branch_from = entries[0].from; 2386 err = add_callchain_ip(thread, cursor, parent, 2387 root_al, &cpumode, ip, 2388 true, flags, NULL, 2389 *branch_from, symbols); 2390 if (err) 2391 return err; 2392 } 2393 2394 return 0; 2395 } 2396 2397 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread, 2398 struct callchain_cursor *cursor) 2399 { 2400 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2401 struct callchain_cursor_node *cnode; 2402 struct stitch_list *stitch_node; 2403 int err; 2404 2405 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) { 2406 cnode = &stitch_node->cursor; 2407 2408 err = callchain_cursor_append(cursor, cnode->ip, 2409 &cnode->ms, 2410 cnode->branch, 2411 &cnode->branch_flags, 2412 cnode->nr_loop_iter, 2413 cnode->iter_cycles, 2414 cnode->branch_from, 2415 cnode->srcline); 2416 if (err) 2417 return err; 2418 } 2419 return 0; 2420 } 2421 2422 static struct stitch_list *get_stitch_node(struct thread *thread) 2423 { 2424 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2425 struct stitch_list *stitch_node; 2426 2427 if (!list_empty(&lbr_stitch->free_lists)) { 2428 stitch_node = list_first_entry(&lbr_stitch->free_lists, 2429 struct stitch_list, node); 2430 list_del(&stitch_node->node); 2431 2432 return stitch_node; 2433 } 2434 2435 return malloc(sizeof(struct stitch_list)); 2436 } 2437 2438 static bool has_stitched_lbr(struct thread *thread, 2439 struct perf_sample *cur, 2440 struct perf_sample *prev, 2441 unsigned int max_lbr, 2442 bool callee) 2443 { 2444 struct branch_stack *cur_stack = cur->branch_stack; 2445 struct branch_entry *cur_entries = perf_sample__branch_entries(cur); 2446 struct branch_stack *prev_stack = prev->branch_stack; 2447 struct branch_entry *prev_entries = perf_sample__branch_entries(prev); 2448 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2449 int i, j, nr_identical_branches = 0; 2450 struct stitch_list *stitch_node; 2451 u64 cur_base, distance; 2452 2453 if (!cur_stack || !prev_stack) 2454 return false; 2455 2456 /* Find the physical index of the base-of-stack for current sample. */ 2457 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1; 2458 2459 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) : 2460 (max_lbr + prev_stack->hw_idx - cur_base); 2461 /* Previous sample has shorter stack. Nothing can be stitched. */ 2462 if (distance + 1 > prev_stack->nr) 2463 return false; 2464 2465 /* 2466 * Check if there are identical LBRs between two samples. 2467 * Identical LBRs must have same from, to and flags values. Also, 2468 * they have to be saved in the same LBR registers (same physical 2469 * index). 2470 * 2471 * Starts from the base-of-stack of current sample. 2472 */ 2473 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { 2474 if ((prev_entries[i].from != cur_entries[j].from) || 2475 (prev_entries[i].to != cur_entries[j].to) || 2476 (prev_entries[i].flags.value != cur_entries[j].flags.value)) 2477 break; 2478 nr_identical_branches++; 2479 } 2480 2481 if (!nr_identical_branches) 2482 return false; 2483 2484 /* 2485 * Save the LBRs between the base-of-stack of previous sample 2486 * and the base-of-stack of current sample into lbr_stitch->lists. 2487 * These LBRs will be stitched later. 2488 */ 2489 for (i = prev_stack->nr - 1; i > (int)distance; i--) { 2490 2491 if (!lbr_stitch->prev_lbr_cursor[i].valid) 2492 continue; 2493 2494 stitch_node = get_stitch_node(thread); 2495 if (!stitch_node) 2496 return false; 2497 2498 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i], 2499 sizeof(struct callchain_cursor_node)); 2500 2501 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps); 2502 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map); 2503 2504 if (callee) 2505 list_add(&stitch_node->node, &lbr_stitch->lists); 2506 else 2507 list_add_tail(&stitch_node->node, &lbr_stitch->lists); 2508 } 2509 2510 return true; 2511 } 2512 2513 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr) 2514 { 2515 if (thread__lbr_stitch(thread)) 2516 return true; 2517 2518 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch))); 2519 if (!thread__lbr_stitch(thread)) 2520 goto err; 2521 2522 thread__lbr_stitch(thread)->prev_lbr_cursor = 2523 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node)); 2524 if (!thread__lbr_stitch(thread)->prev_lbr_cursor) 2525 goto free_lbr_stitch; 2526 2527 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1; 2528 2529 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); 2530 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); 2531 2532 return true; 2533 2534 free_lbr_stitch: 2535 free(thread__lbr_stitch(thread)); 2536 thread__set_lbr_stitch(thread, NULL); 2537 err: 2538 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n"); 2539 thread__set_lbr_stitch_enable(thread, false); 2540 return false; 2541 } 2542 2543 /* 2544 * Resolve LBR callstack chain sample 2545 * Return: 2546 * 1 on success get LBR callchain information 2547 * 0 no available LBR callchain information, should try fp 2548 * negative error code on other errors. 2549 */ 2550 static int resolve_lbr_callchain_sample(struct thread *thread, 2551 struct callchain_cursor *cursor, 2552 struct perf_sample *sample, 2553 struct symbol **parent, 2554 struct addr_location *root_al, 2555 int max_stack, 2556 unsigned int max_lbr, 2557 bool symbols) 2558 { 2559 bool callee = (callchain_param.order == ORDER_CALLEE); 2560 struct ip_callchain *chain = sample->callchain; 2561 int chain_nr = min(max_stack, (int)chain->nr), i; 2562 struct lbr_stitch *lbr_stitch; 2563 bool stitched_lbr = false; 2564 u64 branch_from = 0; 2565 int err; 2566 2567 for (i = 0; i < chain_nr; i++) { 2568 if (chain->ips[i] == PERF_CONTEXT_USER) 2569 break; 2570 } 2571 2572 /* LBR only affects the user callchain */ 2573 if (i == chain_nr) 2574 return 0; 2575 2576 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx && 2577 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { 2578 lbr_stitch = thread__lbr_stitch(thread); 2579 2580 stitched_lbr = has_stitched_lbr(thread, sample, 2581 &lbr_stitch->prev_sample, 2582 max_lbr, callee); 2583 2584 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) { 2585 struct stitch_list *stitch_node; 2586 2587 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) 2588 map_symbol__exit(&stitch_node->cursor.ms); 2589 2590 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists); 2591 } 2592 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); 2593 } 2594 2595 if (callee) { 2596 /* Add kernel ip */ 2597 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2598 parent, root_al, branch_from, 2599 true, i, symbols); 2600 if (err) 2601 goto error; 2602 2603 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2604 root_al, &branch_from, true, symbols); 2605 if (err) 2606 goto error; 2607 2608 if (stitched_lbr) { 2609 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2610 if (err) 2611 goto error; 2612 } 2613 2614 } else { 2615 if (stitched_lbr) { 2616 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2617 if (err) 2618 goto error; 2619 } 2620 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2621 root_al, &branch_from, false, symbols); 2622 if (err) 2623 goto error; 2624 2625 /* Add kernel ip */ 2626 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2627 parent, root_al, branch_from, 2628 false, i, symbols); 2629 if (err) 2630 goto error; 2631 } 2632 return 1; 2633 2634 error: 2635 return (err < 0) ? err : 0; 2636 } 2637 2638 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, 2639 struct callchain_cursor *cursor, 2640 struct symbol **parent, 2641 struct addr_location *root_al, 2642 u8 *cpumode, int ent, bool symbols) 2643 { 2644 int err = 0; 2645 2646 while (--ent >= 0) { 2647 u64 ip = chain->ips[ent]; 2648 2649 if (ip >= PERF_CONTEXT_MAX) { 2650 err = add_callchain_ip(thread, cursor, parent, 2651 root_al, cpumode, ip, 2652 false, NULL, NULL, 0, symbols); 2653 break; 2654 } 2655 } 2656 return err; 2657 } 2658 2659 static u64 get_leaf_frame_caller(struct perf_sample *sample, 2660 struct thread *thread, int usr_idx) 2661 { 2662 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64")) 2663 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx); 2664 else 2665 return 0; 2666 } 2667 2668 static int thread__resolve_callchain_sample(struct thread *thread, 2669 struct callchain_cursor *cursor, 2670 struct evsel *evsel, 2671 struct perf_sample *sample, 2672 struct symbol **parent, 2673 struct addr_location *root_al, 2674 int max_stack, 2675 bool symbols) 2676 { 2677 struct branch_stack *branch = sample->branch_stack; 2678 struct branch_entry *entries = perf_sample__branch_entries(sample); 2679 struct ip_callchain *chain = sample->callchain; 2680 int chain_nr = 0; 2681 u8 cpumode = PERF_RECORD_MISC_USER; 2682 int i, j, err, nr_entries, usr_idx; 2683 int skip_idx = -1; 2684 int first_call = 0; 2685 u64 leaf_frame_caller; 2686 2687 if (chain) 2688 chain_nr = chain->nr; 2689 2690 if (evsel__has_branch_callstack(evsel)) { 2691 struct perf_env *env = evsel__env(evsel); 2692 2693 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2694 root_al, max_stack, 2695 !env ? 0 : env->max_branches, 2696 symbols); 2697 if (err) 2698 return (err < 0) ? err : 0; 2699 } 2700 2701 /* 2702 * Based on DWARF debug information, some architectures skip 2703 * a callchain entry saved by the kernel. 2704 */ 2705 skip_idx = arch_skip_callchain_idx(thread, chain); 2706 2707 /* 2708 * Add branches to call stack for easier browsing. This gives 2709 * more context for a sample than just the callers. 2710 * 2711 * This uses individual histograms of paths compared to the 2712 * aggregated histograms the normal LBR mode uses. 2713 * 2714 * Limitations for now: 2715 * - No extra filters 2716 * - No annotations (should annotate somehow) 2717 */ 2718 2719 if (branch && callchain_param.branch_callstack) { 2720 int nr = min(max_stack, (int)branch->nr); 2721 struct branch_entry be[nr]; 2722 struct iterations iter[nr]; 2723 2724 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2725 pr_warning("corrupted branch chain. skipping...\n"); 2726 goto check_calls; 2727 } 2728 2729 for (i = 0; i < nr; i++) { 2730 if (callchain_param.order == ORDER_CALLEE) { 2731 be[i] = entries[i]; 2732 2733 if (chain == NULL) 2734 continue; 2735 2736 /* 2737 * Check for overlap into the callchain. 2738 * The return address is one off compared to 2739 * the branch entry. To adjust for this 2740 * assume the calling instruction is not longer 2741 * than 8 bytes. 2742 */ 2743 if (i == skip_idx || 2744 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2745 first_call++; 2746 else if (be[i].from < chain->ips[first_call] && 2747 be[i].from >= chain->ips[first_call] - 8) 2748 first_call++; 2749 } else 2750 be[i] = entries[branch->nr - i - 1]; 2751 } 2752 2753 memset(iter, 0, sizeof(struct iterations) * nr); 2754 nr = remove_loops(be, nr, iter); 2755 2756 for (i = 0; i < nr; i++) { 2757 err = add_callchain_ip(thread, cursor, parent, 2758 root_al, 2759 NULL, be[i].to, 2760 true, &be[i].flags, 2761 NULL, be[i].from, symbols); 2762 2763 if (!err) { 2764 err = add_callchain_ip(thread, cursor, parent, root_al, 2765 NULL, be[i].from, 2766 true, &be[i].flags, 2767 &iter[i], 0, symbols); 2768 } 2769 if (err == -EINVAL) 2770 break; 2771 if (err) 2772 return err; 2773 } 2774 2775 if (chain_nr == 0) 2776 return 0; 2777 2778 chain_nr -= nr; 2779 } 2780 2781 check_calls: 2782 if (chain && callchain_param.order != ORDER_CALLEE) { 2783 err = find_prev_cpumode(chain, thread, cursor, parent, root_al, 2784 &cpumode, chain->nr - first_call, symbols); 2785 if (err) 2786 return (err < 0) ? err : 0; 2787 } 2788 for (i = first_call, nr_entries = 0; 2789 i < chain_nr && nr_entries < max_stack; i++) { 2790 u64 ip; 2791 2792 if (callchain_param.order == ORDER_CALLEE) 2793 j = i; 2794 else 2795 j = chain->nr - i - 1; 2796 2797 #ifdef HAVE_SKIP_CALLCHAIN_IDX 2798 if (j == skip_idx) 2799 continue; 2800 #endif 2801 ip = chain->ips[j]; 2802 if (ip < PERF_CONTEXT_MAX) 2803 ++nr_entries; 2804 else if (callchain_param.order != ORDER_CALLEE) { 2805 err = find_prev_cpumode(chain, thread, cursor, parent, 2806 root_al, &cpumode, j, symbols); 2807 if (err) 2808 return (err < 0) ? err : 0; 2809 continue; 2810 } 2811 2812 /* 2813 * PERF_CONTEXT_USER allows us to locate where the user stack ends. 2814 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER, 2815 * the index will be different in order to add the missing frame 2816 * at the right place. 2817 */ 2818 2819 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1; 2820 2821 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { 2822 2823 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx); 2824 2825 /* 2826 * check if leaf_frame_Caller != ip to not add the same 2827 * value twice. 2828 */ 2829 2830 if (leaf_frame_caller && leaf_frame_caller != ip) { 2831 2832 err = add_callchain_ip(thread, cursor, parent, 2833 root_al, &cpumode, leaf_frame_caller, 2834 false, NULL, NULL, 0, symbols); 2835 if (err) 2836 return (err < 0) ? err : 0; 2837 } 2838 } 2839 2840 err = add_callchain_ip(thread, cursor, parent, 2841 root_al, &cpumode, ip, 2842 false, NULL, NULL, 0, symbols); 2843 2844 if (err) 2845 return (err < 0) ? err : 0; 2846 } 2847 2848 return 0; 2849 } 2850 2851 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip) 2852 { 2853 struct symbol *sym = ms->sym; 2854 struct map *map = ms->map; 2855 struct inline_node *inline_node; 2856 struct inline_list *ilist; 2857 struct dso *dso; 2858 u64 addr; 2859 int ret = 1; 2860 struct map_symbol ilist_ms; 2861 2862 if (!symbol_conf.inline_name || !map || !sym) 2863 return ret; 2864 2865 addr = map__dso_map_ip(map, ip); 2866 addr = map__rip_2objdump(map, addr); 2867 dso = map__dso(map); 2868 2869 inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr); 2870 if (!inline_node) { 2871 inline_node = dso__parse_addr_inlines(dso, addr, sym); 2872 if (!inline_node) 2873 return ret; 2874 inlines__tree_insert(dso__inlined_nodes(dso), inline_node); 2875 } 2876 2877 ilist_ms = (struct map_symbol) { 2878 .maps = maps__get(ms->maps), 2879 .map = map__get(map), 2880 }; 2881 list_for_each_entry(ilist, &inline_node->val, list) { 2882 ilist_ms.sym = ilist->symbol; 2883 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false, 2884 NULL, 0, 0, 0, ilist->srcline); 2885 2886 if (ret != 0) 2887 return ret; 2888 } 2889 map_symbol__exit(&ilist_ms); 2890 2891 return ret; 2892 } 2893 2894 static int unwind_entry(struct unwind_entry *entry, void *arg) 2895 { 2896 struct callchain_cursor *cursor = arg; 2897 const char *srcline = NULL; 2898 u64 addr = entry->ip; 2899 2900 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL) 2901 return 0; 2902 2903 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) 2904 return 0; 2905 2906 /* 2907 * Convert entry->ip from a virtual address to an offset in 2908 * its corresponding binary. 2909 */ 2910 if (entry->ms.map) 2911 addr = map__dso_map_ip(entry->ms.map, entry->ip); 2912 2913 srcline = callchain_srcline(&entry->ms, addr); 2914 return callchain_cursor_append(cursor, entry->ip, &entry->ms, 2915 false, NULL, 0, 0, 0, srcline); 2916 } 2917 2918 static int thread__resolve_callchain_unwind(struct thread *thread, 2919 struct callchain_cursor *cursor, 2920 struct evsel *evsel, 2921 struct perf_sample *sample, 2922 int max_stack, bool symbols) 2923 { 2924 /* Can we do dwarf post unwind? */ 2925 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) && 2926 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER))) 2927 return 0; 2928 2929 /* Bail out if nothing was captured. */ 2930 if ((!sample->user_regs.regs) || 2931 (!sample->user_stack.size)) 2932 return 0; 2933 2934 if (!symbols) 2935 pr_debug("Not resolving symbols with an unwinder isn't currently supported\n"); 2936 2937 return unwind__get_entries(unwind_entry, cursor, 2938 thread, sample, max_stack, false); 2939 } 2940 2941 int __thread__resolve_callchain(struct thread *thread, 2942 struct callchain_cursor *cursor, 2943 struct evsel *evsel, 2944 struct perf_sample *sample, 2945 struct symbol **parent, 2946 struct addr_location *root_al, 2947 int max_stack, 2948 bool symbols) 2949 { 2950 int ret = 0; 2951 2952 if (cursor == NULL) 2953 return -ENOMEM; 2954 2955 callchain_cursor_reset(cursor); 2956 2957 if (callchain_param.order == ORDER_CALLEE) { 2958 ret = thread__resolve_callchain_sample(thread, cursor, 2959 evsel, sample, 2960 parent, root_al, 2961 max_stack, symbols); 2962 if (ret) 2963 return ret; 2964 ret = thread__resolve_callchain_unwind(thread, cursor, 2965 evsel, sample, 2966 max_stack, symbols); 2967 } else { 2968 ret = thread__resolve_callchain_unwind(thread, cursor, 2969 evsel, sample, 2970 max_stack, symbols); 2971 if (ret) 2972 return ret; 2973 ret = thread__resolve_callchain_sample(thread, cursor, 2974 evsel, sample, 2975 parent, root_al, 2976 max_stack, symbols); 2977 } 2978 2979 return ret; 2980 } 2981 2982 int machine__for_each_thread(struct machine *machine, 2983 int (*fn)(struct thread *thread, void *p), 2984 void *priv) 2985 { 2986 return threads__for_each_thread(&machine->threads, fn, priv); 2987 } 2988 2989 int machines__for_each_thread(struct machines *machines, 2990 int (*fn)(struct thread *thread, void *p), 2991 void *priv) 2992 { 2993 struct rb_node *nd; 2994 int rc = 0; 2995 2996 rc = machine__for_each_thread(&machines->host, fn, priv); 2997 if (rc != 0) 2998 return rc; 2999 3000 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 3001 struct machine *machine = rb_entry(nd, struct machine, rb_node); 3002 3003 rc = machine__for_each_thread(machine, fn, priv); 3004 if (rc != 0) 3005 return rc; 3006 } 3007 return rc; 3008 } 3009 3010 3011 static int thread_list_cb(struct thread *thread, void *data) 3012 { 3013 struct list_head *list = data; 3014 struct thread_list *entry = malloc(sizeof(*entry)); 3015 3016 if (!entry) 3017 return -ENOMEM; 3018 3019 entry->thread = thread__get(thread); 3020 list_add_tail(&entry->list, list); 3021 return 0; 3022 } 3023 3024 int machine__thread_list(struct machine *machine, struct list_head *list) 3025 { 3026 return machine__for_each_thread(machine, thread_list_cb, list); 3027 } 3028 3029 void thread_list__delete(struct list_head *list) 3030 { 3031 struct thread_list *pos, *next; 3032 3033 list_for_each_entry_safe(pos, next, list, list) { 3034 thread__zput(pos->thread); 3035 list_del(&pos->list); 3036 free(pos); 3037 } 3038 } 3039 3040 pid_t machine__get_current_tid(struct machine *machine, int cpu) 3041 { 3042 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) 3043 return -1; 3044 3045 return machine->current_tid[cpu]; 3046 } 3047 3048 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 3049 pid_t tid) 3050 { 3051 struct thread *thread; 3052 const pid_t init_val = -1; 3053 3054 if (cpu < 0) 3055 return -EINVAL; 3056 3057 if (realloc_array_as_needed(machine->current_tid, 3058 machine->current_tid_sz, 3059 (unsigned int)cpu, 3060 &init_val)) 3061 return -ENOMEM; 3062 3063 machine->current_tid[cpu] = tid; 3064 3065 thread = machine__findnew_thread(machine, pid, tid); 3066 if (!thread) 3067 return -ENOMEM; 3068 3069 thread__set_cpu(thread, cpu); 3070 thread__put(thread); 3071 3072 return 0; 3073 } 3074 3075 /* 3076 * Compares the raw arch string. N.B. see instead perf_env__arch() or 3077 * machine__normalized_is() if a normalized arch is needed. 3078 */ 3079 bool machine__is(struct machine *machine, const char *arch) 3080 { 3081 return machine && !strcmp(perf_env__raw_arch(machine->env), arch); 3082 } 3083 3084 bool machine__normalized_is(struct machine *machine, const char *arch) 3085 { 3086 return machine && !strcmp(perf_env__arch(machine->env), arch); 3087 } 3088 3089 int machine__nr_cpus_avail(struct machine *machine) 3090 { 3091 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; 3092 } 3093 3094 int machine__get_kernel_start(struct machine *machine) 3095 { 3096 struct map *map = machine__kernel_map(machine); 3097 int err = 0; 3098 3099 /* 3100 * The only addresses above 2^63 are kernel addresses of a 64-bit 3101 * kernel. Note that addresses are unsigned so that on a 32-bit system 3102 * all addresses including kernel addresses are less than 2^32. In 3103 * that case (32-bit system), if the kernel mapping is unknown, all 3104 * addresses will be assumed to be in user space - see 3105 * machine__kernel_ip(). 3106 */ 3107 machine->kernel_start = 1ULL << 63; 3108 if (map) { 3109 err = map__load(map); 3110 /* 3111 * On x86_64, PTI entry trampolines are less than the 3112 * start of kernel text, but still above 2^63. So leave 3113 * kernel_start = 1ULL << 63 for x86_64. 3114 */ 3115 if (!err && !machine__is(machine, "x86_64")) 3116 machine->kernel_start = map__start(map); 3117 } 3118 return err; 3119 } 3120 3121 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr) 3122 { 3123 u8 addr_cpumode = cpumode; 3124 bool kernel_ip; 3125 3126 if (!machine->single_address_space) 3127 goto out; 3128 3129 kernel_ip = machine__kernel_ip(machine, addr); 3130 switch (cpumode) { 3131 case PERF_RECORD_MISC_KERNEL: 3132 case PERF_RECORD_MISC_USER: 3133 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL : 3134 PERF_RECORD_MISC_USER; 3135 break; 3136 case PERF_RECORD_MISC_GUEST_KERNEL: 3137 case PERF_RECORD_MISC_GUEST_USER: 3138 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL : 3139 PERF_RECORD_MISC_GUEST_USER; 3140 break; 3141 default: 3142 break; 3143 } 3144 out: 3145 return addr_cpumode; 3146 } 3147 3148 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, 3149 const struct dso_id *id) 3150 { 3151 return dsos__findnew_id(&machine->dsos, filename, id); 3152 } 3153 3154 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 3155 { 3156 return machine__findnew_dso_id(machine, filename, NULL); 3157 } 3158 3159 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 3160 { 3161 struct machine *machine = vmachine; 3162 struct map *map; 3163 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); 3164 3165 if (sym == NULL) 3166 return NULL; 3167 3168 *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL; 3169 *addrp = map__unmap_ip(map, sym->start); 3170 return sym->name; 3171 } 3172 3173 struct machine__for_each_dso_cb_args { 3174 struct machine *machine; 3175 machine__dso_t fn; 3176 void *priv; 3177 }; 3178 3179 static int machine__for_each_dso_cb(struct dso *dso, void *data) 3180 { 3181 struct machine__for_each_dso_cb_args *args = data; 3182 3183 return args->fn(dso, args->machine, args->priv); 3184 } 3185 3186 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv) 3187 { 3188 struct machine__for_each_dso_cb_args args = { 3189 .machine = machine, 3190 .fn = fn, 3191 .priv = priv, 3192 }; 3193 3194 return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args); 3195 } 3196 3197 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) 3198 { 3199 struct maps *maps = machine__kernel_maps(machine); 3200 3201 return maps__for_each_map(maps, fn, priv); 3202 } 3203 3204 bool machine__is_lock_function(struct machine *machine, u64 addr) 3205 { 3206 if (!machine->sched.text_start) { 3207 struct map *kmap; 3208 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap); 3209 3210 if (!sym) { 3211 /* to avoid retry */ 3212 machine->sched.text_start = 1; 3213 return false; 3214 } 3215 3216 machine->sched.text_start = map__unmap_ip(kmap, sym->start); 3217 3218 /* should not fail from here */ 3219 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap); 3220 machine->sched.text_end = map__unmap_ip(kmap, sym->start); 3221 3222 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap); 3223 machine->lock.text_start = map__unmap_ip(kmap, sym->start); 3224 3225 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); 3226 machine->lock.text_end = map__unmap_ip(kmap, sym->start); 3227 3228 sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap); 3229 if (sym) { 3230 machine->traceiter.text_start = map__unmap_ip(kmap, sym->start); 3231 machine->traceiter.text_end = map__unmap_ip(kmap, sym->end); 3232 } 3233 sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap); 3234 if (sym) { 3235 machine->trace.text_start = map__unmap_ip(kmap, sym->start); 3236 machine->trace.text_end = map__unmap_ip(kmap, sym->end); 3237 } 3238 } 3239 3240 /* failed to get kernel symbols */ 3241 if (machine->sched.text_start == 1) 3242 return false; 3243 3244 /* mutex and rwsem functions are in sched text section */ 3245 if (machine->sched.text_start <= addr && addr < machine->sched.text_end) 3246 return true; 3247 3248 /* spinlock functions are in lock text section */ 3249 if (machine->lock.text_start <= addr && addr < machine->lock.text_end) 3250 return true; 3251 3252 /* traceiter functions currently don't have their own section 3253 * but we consider them lock functions 3254 */ 3255 if (machine->traceiter.text_start != 0) { 3256 if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end) 3257 return true; 3258 } 3259 3260 if (machine->trace.text_start != 0) { 3261 if (machine->trace.text_start <= addr && addr < machine->trace.text_end) 3262 return true; 3263 } 3264 3265 return false; 3266 } 3267 3268 int machine__hit_all_dsos(struct machine *machine) 3269 { 3270 return dsos__hit_all(&machine->dsos); 3271 } 3272