Lines Matching +full:0 +full:x2c000
61 machine->pid) < 0) in machine__set_mmap_name()
64 return machine->mmap_name ? 0 : -ENOMEM; in machine__set_mmap_name()
72 thread__set_comm(thread, comm, 0); in thread__set_guest_comm()
79 memset(machine, 0, sizeof(*machine)); in machine__init()
94 machine->id_hdr_size = 0; in machine__init()
97 machine->kernel_start = 0; in machine__init()
121 err = 0; in machine__init()
129 return 0; in machine__init()
141 if (kernel_maps && machine__create_kernel_maps(machine) < 0) { in __machine__new_host()
166 memset(&event, 0, sizeof(event)); in machine__init_live()
194 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { in machine__new_kallsyms()
255 if (machine__init(machine, root_dir, pid) != 0) { in machines__add()
575 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
583 struct thread *thread = machine__findnew_thread(machine, 0, 0); in machine__idle_thread()
585 if (!thread || thread__set_comm(thread, "swapper", 0) || in machine__idle_thread()
586 thread__set_namespaces(thread, 0, NULL)) in machine__idle_thread()
608 int err = 0; in machine__process_comm_event()
634 int err = 0; in machine__process_namespaces_event()
671 return 0; in machine__process_cgroup_event()
679 return 0; in machine__process_lost_event()
688 return 0; in machine__process_lost_samples_event()
696 return 0; in machine__process_aux_event()
704 return 0; in machine__process_itrace_start_event()
712 return 0; in machine__process_aux_output_hw_id_event()
723 return 0; in machine__process_switch_event()
733 int err = 0; in machine__process_ksymbol_register()
743 map = map__new2(0, dso); in machine__process_ksymbol_register()
774 0, 0, event->ksymbol.name); in machine__process_ksymbol_register()
795 return 0; in machine__process_ksymbol_unregister()
807 return 0; in machine__process_ksymbol_unregister()
818 if (event->ksymbol.len == 0) in machine__process_ksymbol()
819 return 0; in machine__process_ksymbol()
867 return 0; in machine__process_text_poke()
945 return 0; in machine_fprintf_cb()
952 .printed = 0, in machine__fprintf()
1008 u64 addr = 0; in machine__get_running_kernel_start()
1013 return 0; in machine__get_running_kernel_start()
1015 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { in machine__get_running_kernel_start()
1035 return 0; in machine__get_running_kernel_start()
1083 for (i = 0; i < ARRAY_SIZE(syms); i++) { in find_entry_trampoline()
1089 return 0; in find_entry_trampoline()
1096 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1097 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1098 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1112 return 0; in machine__map_x86_64_entry_trampolines_cb()
1120 return 0; in machine__map_x86_64_entry_trampolines_cb()
1141 return 0; in machine__map_x86_64_entry_trampolines()
1145 return 0; in machine__map_x86_64_entry_trampolines()
1150 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { in machine__map_x86_64_entry_trampolines()
1162 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) in machine__map_x86_64_entry_trampolines()
1168 return 0; in machine__map_x86_64_entry_trampolines()
1174 return 0; in machine__create_extra_kernel_maps()
1184 machine->vmlinux_map = map__new2(0, kernel); in __machine__create_kernel_maps()
1212 int ret = 0; in machines__create_guest_kernel_maps()
1214 int i, items = 0; in machines__create_guest_kernel_maps()
1227 if (items <= 0) in machines__create_guest_kernel_maps()
1229 for (i = 0; i < items; i++) { in machines__create_guest_kernel_maps()
1230 if (!isdigit(namelist[i]->d_name[0])) { in machines__create_guest_kernel_maps()
1235 if ((*endp != '\0') || in machines__create_guest_kernel_maps()
1290 if (ret > 0) { in machine__load_kallsyms()
1309 if (ret > 0) in machine__load_vmlinux_path()
1338 *tmp = '\0'; in get_kernel_version()
1356 return 0; in maps__set_module_path()
1377 return 0; in maps__set_module_path()
1385 int ret = 0; in maps__set_modules_path_dir()
1388 if (iod.dirfd < 0) { in maps__set_modules_path_dir()
1403 if (depth == 0) { in maps__set_modules_path_dir()
1415 if (ret < 0) in maps__set_modules_path_dir()
1458 modules_path, sizeof(modules_path), 0); in machine__set_modules_path()
1464 return 0; in arch__fix_module_text_start()
1473 if (arch__fix_module_text_start(&start, &size, name) < 0) in machine__create_module()
1483 return 0; in machine__create_module()
1505 return 0; in machine__create_modules()
1509 return 0; in machine__create_modules()
1521 if (start == 0 && end == 0) in machine__set_kernel_mmap()
1522 map__set_end(machine->vmlinux_map, ~0ULL); in machine__set_kernel_mmap()
1547 u64 start = 0, end = ~0ULL; in machine__create_kernel_maps()
1554 if (ret < 0) in machine__create_kernel_maps()
1557 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { in machine__create_kernel_maps()
1579 if (ret < 0) in machine__create_kernel_maps()
1586 if (end == ~0ULL) { in machine__create_kernel_maps()
1606 return dso__is_kcore(dso) ? 1 : 0; in machine__uses_kcore_cb()
1611 return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false; in machine__uses_kcore()
1642 return 0; in machine__process_kernel_mmap_event()
1649 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; in machine__process_kernel_mmap_event()
1657 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; in machine__process_kernel_mmap_event()
1659 if (xm->name[0] == '/' || in machine__process_kernel_mmap_event()
1660 (!is_kernel_mmap && xm->name[0] == '[')) { in machine__process_kernel_mmap_event()
1686 if (__machine__create_kernel_maps(machine, kernel) < 0) { in machine__process_kernel_mmap_event()
1694 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { in machine__process_kernel_mmap_event()
1707 if (xm->pgoff != 0) { in machine__process_kernel_mmap_event()
1723 return 0; in machine__process_kernel_mmap_event()
1735 int ret = 0; in machine__process_mmap2_event()
1761 if (ret < 0) in machine__process_mmap2_event()
1763 return 0; in machine__process_mmap2_event()
1786 return 0; in machine__process_mmap2_event()
1794 return 0; in machine__process_mmap2_event()
1802 u32 prot = 0; in machine__process_mmap_event()
1803 int ret = 0; in machine__process_mmap_event()
1818 if (ret < 0) in machine__process_mmap_event()
1820 return 0; in machine__process_mmap_event()
1833 &dso_id_empty, prot, /*flags=*/0, event->mmap.filename, thread); in machine__process_mmap_event()
1844 return 0; in machine__process_mmap_event()
1852 return 0; in machine__process_mmap_event()
1870 int err = 0; in machine__process_fork_event()
1916 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { in machine__process_fork_event()
1945 return 0; in machine__process_exit_event()
1997 return regexec(regex, sym->name, 0, NULL, 0) == 0; in symbol__match_regex()
2022 ams->phys_addr = 0; in ip__resolve_ams()
2023 ams->data_page_size = 0; in ip__resolve_ams()
2107 int nr_loop_iter = 0, err = 0; in add_callchain_ip()
2108 u64 iter_cycles = 0; in add_callchain_ip()
2112 al.filtered = 0; in add_callchain_ip()
2194 for (i = 0; i < bs->nr; i++) { in sample__resolve_bstack()
2210 iter->cycles = 0; in save_iterations()
2212 for (i = 0; i < nr; i++) in save_iterations()
2218 #define NO_ENTRY 0xff
2233 for (i = 0; i < nr; i++) { in remove_loops()
2242 off = 0; in remove_loops()
2250 if (j > 0) { in remove_loops()
2282 for (i = 0; i < end + 1; i++) { in lbr_callchain_add_kernel_ip()
2290 return 0; in lbr_callchain_add_kernel_ip()
2293 for (i = end; i >= 0; i--) { in lbr_callchain_add_kernel_ip()
2302 return 0; in lbr_callchain_add_kernel_ip()
2361 for (i = 0; i < (int)(cursor->nr - 1); i++) in lbr_callchain_add_lbr_ip()
2368 ip = entries[0].to; in lbr_callchain_add_lbr_ip()
2369 flags = &entries[0].flags; in lbr_callchain_add_lbr_ip()
2370 *branch_from = entries[0].from; in lbr_callchain_add_lbr_ip()
2381 * But does not need to save current cursor node for entry 0. in lbr_callchain_add_lbr_ip()
2393 for (i = 0; i < lbr_nr; i++) { in lbr_callchain_add_lbr_ip()
2404 return 0; in lbr_callchain_add_lbr_ip()
2408 for (i = lbr_nr - 1; i >= 0; i--) { in lbr_callchain_add_lbr_ip()
2420 if (lbr_nr > 0) { in lbr_callchain_add_lbr_ip()
2422 ip = entries[0].to; in lbr_callchain_add_lbr_ip()
2423 flags = &entries[0].flags; in lbr_callchain_add_lbr_ip()
2424 *branch_from = entries[0].from; in lbr_callchain_add_lbr_ip()
2433 return 0; in lbr_callchain_add_lbr_ip()
2458 return 0; in lbr_callchain_add_stitched_lbr_ip()
2488 int i, j, nr_identical_branches = 0; in has_stitched_lbr()
2512 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { in has_stitched_lbr()
2586 * 0 no available LBR callchain information, should try fp
2603 u64 branch_from = 0; in resolve_lbr_callchain_sample()
2606 for (i = 0; i < chain_nr; i++) { in resolve_lbr_callchain_sample()
2613 return 0; in resolve_lbr_callchain_sample()
2616 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { in resolve_lbr_callchain_sample()
2674 return (err < 0) ? err : 0; in resolve_lbr_callchain_sample()
2683 int err = 0; in find_prev_cpumode()
2685 while (--ent >= 0) { in find_prev_cpumode()
2691 false, NULL, NULL, 0, symbols); in find_prev_cpumode()
2704 return 0; in get_leaf_frame_caller()
2719 int chain_nr = 0; in thread__resolve_callchain_sample()
2723 int first_call = 0; in thread__resolve_callchain_sample()
2734 !env ? 0 : env->max_branches, in thread__resolve_callchain_sample()
2737 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2768 for (i = 0; i < nr; i++) { in thread__resolve_callchain_sample()
2792 memset(iter, 0, sizeof(struct iterations) * nr); in thread__resolve_callchain_sample()
2795 for (i = 0; i < nr; i++) { in thread__resolve_callchain_sample()
2806 &iter[i], 0, symbols); in thread__resolve_callchain_sample()
2814 if (chain_nr == 0) in thread__resolve_callchain_sample()
2815 return 0; in thread__resolve_callchain_sample()
2825 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2827 for (i = first_call, nr_entries = 0; in thread__resolve_callchain_sample()
2847 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2860 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { in thread__resolve_callchain_sample()
2873 false, NULL, NULL, 0, symbols); in thread__resolve_callchain_sample()
2875 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2881 false, NULL, NULL, 0, symbols); in thread__resolve_callchain_sample()
2884 return (err < 0) ? err : 0; in thread__resolve_callchain_sample()
2887 return 0; in thread__resolve_callchain_sample()
2923 NULL, 0, 0, 0, ilist->srcline); in append_inlines()
2925 if (ret != 0) in append_inlines()
2940 return 0; in unwind_entry()
2942 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) in unwind_entry()
2943 return 0; in unwind_entry()
2954 false, NULL, 0, 0, 0, srcline); in unwind_entry()
2966 return 0; in thread__resolve_callchain_unwind()
2971 return 0; in thread__resolve_callchain_unwind()
2989 int ret = 0; in __thread__resolve_callchain()
3033 int rc = 0; in machines__for_each_thread()
3036 if (rc != 0) in machines__for_each_thread()
3043 if (rc != 0) in machines__for_each_thread()
3060 return 0; in thread_list_cb()
3081 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) in machine__get_current_tid()
3093 if (cpu < 0) in machine__set_current_tid()
3111 return 0; in machine__set_current_tid()
3130 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; in machine__nr_cpus_avail()
3136 int err = 0; in machine__get_kernel_start()
3294 if (machine->traceiter.text_start != 0) { in machine__is_lock_function()
3299 if (machine->trace.text_start != 0) { in machine__is_lock_function()