Lines Matching +full:min +full:- +full:sample +full:- +full:time +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0
32 #include "thread-stack.h"
33 #include "sample-raw.h"
52 struct perf_data *data = session->data; in perf_session__open()
55 pr_err("incompatible file format (rerun with -v to learn more)\n"); in perf_session__open()
56 return -1; in perf_session__open()
59 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) { in perf_session__open()
67 if (perf_header__has_feat(&session->header, HEADER_STAT)) in perf_session__open()
70 if (!evlist__valid_sample_type(session->evlist)) { in perf_session__open()
72 return -1; in perf_session__open()
75 if (!evlist__valid_sample_id_all(session->evlist)) { in perf_session__open()
77 return -1; in perf_session__open()
80 if (!evlist__valid_read_format(session->evlist)) { in perf_session__open()
82 return -1; in perf_session__open()
90 u16 id_hdr_size = evlist__id_hdr_size(session->evlist); in perf_session__set_id_hdr_size()
92 machines__set_id_hdr_size(&session->machines, id_hdr_size); in perf_session__set_id_hdr_size()
97 int ret = machine__create_kernel_maps(&session->machines.host); in perf_session__create_kernel_maps()
100 ret = machines__create_guest_kernel_maps(&session->machines); in perf_session__create_kernel_maps()
106 machines__destroy_kernel_maps(&session->machines); in perf_session__destroy_kernel_maps()
113 evlist__for_each_entry(session->evlist, evsel) { in perf_session__has_comm_exec()
114 if (evsel->core.attr.comm_exec) in perf_session__has_comm_exec()
125 machines__set_comm_exec(&session->machines, comm_exec); in perf_session__set_comm_exec()
134 return perf_session__deliver_event(session, event->event, in ordered_events__deliver_event()
135 session->tool, event->file_offset, in ordered_events__deliver_event()
136 event->file_path); in ordered_events__deliver_event()
144 int ret = -ENOMEM; in __perf_session__new()
150 session->trace_event_repipe = trace_event_repipe; in __perf_session__new()
151 session->tool = tool; in __perf_session__new()
152 session->decomp_data.zstd_decomp = &session->zstd_data; in __perf_session__new()
153 session->active_decomp = &session->decomp_data; in __perf_session__new()
154 INIT_LIST_HEAD(&session->auxtrace_index); in __perf_session__new()
155 machines__init(&session->machines); in __perf_session__new()
156 ordered_events__init(&session->ordered_events, in __perf_session__new()
159 perf_env__init(&session->header.env); in __perf_session__new()
165 session->data = data; in __perf_session__new()
174 * but not in pipe-mode. in __perf_session__new()
176 if (!data->is_pipe) { in __perf_session__new()
181 evlist__init_trace_event_sample_raw(session->evlist, &session->header.env); in __perf_session__new()
184 if (data->is_dir) { in __perf_session__new()
196 session->machines.host.env = host_env; in __perf_session__new()
198 if (session->evlist) in __perf_session__new()
199 session->evlist->session = session; in __perf_session__new()
201 session->machines.host.single_address_space = in __perf_session__new()
202 perf_env__single_address_space(session->machines.host.env); in __perf_session__new()
214 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is in __perf_session__new()
217 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && in __perf_session__new()
218 tool->ordered_events && !evlist__sample_id_all(session->evlist)) { in __perf_session__new()
220 tool->ordered_events = false; in __perf_session__new()
240 next = decomp->next; in perf_decomp__release_events()
241 mmap_len = decomp->mmap_len; in perf_decomp__release_events()
251 auxtrace_index__free(&session->auxtrace_index); in perf_session__delete()
254 perf_decomp__release_events(session->decomp_data.decomp); in perf_session__delete()
255 perf_env__exit(&session->header.env); in perf_session__delete()
256 machines__exit(&session->machines); in perf_session__delete()
257 if (session->data) { in perf_session__delete()
258 if (perf_data__is_read(session->data)) in perf_session__delete()
259 evlist__delete(session->evlist); in perf_session__delete()
260 perf_data__close(session->data); in perf_session__delete()
263 trace_event__cleanup(&session->tevent); in perf_session__delete()
270 void *end = (void *) event + event->header.size; in swap_sample_id_all()
271 int size = end - data; in swap_sample_id_all()
280 struct perf_event_header *hdr = &event->header; in perf_event__all64_swap()
281 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); in perf_event__all64_swap()
286 event->comm.pid = bswap_32(event->comm.pid); in perf_event__comm_swap()
287 event->comm.tid = bswap_32(event->comm.tid); in perf_event__comm_swap()
290 void *data = &event->comm.comm; in perf_event__comm_swap()
300 event->mmap.pid = bswap_32(event->mmap.pid); in perf_event__mmap_swap()
301 event->mmap.tid = bswap_32(event->mmap.tid); in perf_event__mmap_swap()
302 event->mmap.start = bswap_64(event->mmap.start); in perf_event__mmap_swap()
303 event->mmap.len = bswap_64(event->mmap.len); in perf_event__mmap_swap()
304 event->mmap.pgoff = bswap_64(event->mmap.pgoff); in perf_event__mmap_swap()
307 void *data = &event->mmap.filename; in perf_event__mmap_swap()
317 event->mmap2.pid = bswap_32(event->mmap2.pid); in perf_event__mmap2_swap()
318 event->mmap2.tid = bswap_32(event->mmap2.tid); in perf_event__mmap2_swap()
319 event->mmap2.start = bswap_64(event->mmap2.start); in perf_event__mmap2_swap()
320 event->mmap2.len = bswap_64(event->mmap2.len); in perf_event__mmap2_swap()
321 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); in perf_event__mmap2_swap()
323 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) { in perf_event__mmap2_swap()
324 event->mmap2.maj = bswap_32(event->mmap2.maj); in perf_event__mmap2_swap()
325 event->mmap2.min = bswap_32(event->mmap2.min); in perf_event__mmap2_swap()
326 event->mmap2.ino = bswap_64(event->mmap2.ino); in perf_event__mmap2_swap()
327 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); in perf_event__mmap2_swap()
331 void *data = &event->mmap2.filename; in perf_event__mmap2_swap()
339 event->fork.pid = bswap_32(event->fork.pid); in perf_event__task_swap()
340 event->fork.tid = bswap_32(event->fork.tid); in perf_event__task_swap()
341 event->fork.ppid = bswap_32(event->fork.ppid); in perf_event__task_swap()
342 event->fork.ptid = bswap_32(event->fork.ptid); in perf_event__task_swap()
343 event->fork.time = bswap_64(event->fork.time); in perf_event__task_swap()
346 swap_sample_id_all(event, &event->fork + 1); in perf_event__task_swap()
351 event->read.pid = bswap_32(event->read.pid); in perf_event__read_swap()
352 event->read.tid = bswap_32(event->read.tid); in perf_event__read_swap()
353 event->read.value = bswap_64(event->read.value); in perf_event__read_swap()
354 event->read.time_enabled = bswap_64(event->read.time_enabled); in perf_event__read_swap()
355 event->read.time_running = bswap_64(event->read.time_running); in perf_event__read_swap()
356 event->read.id = bswap_64(event->read.id); in perf_event__read_swap()
359 swap_sample_id_all(event, &event->read + 1); in perf_event__read_swap()
364 event->aux.aux_offset = bswap_64(event->aux.aux_offset); in perf_event__aux_swap()
365 event->aux.aux_size = bswap_64(event->aux.aux_size); in perf_event__aux_swap()
366 event->aux.flags = bswap_64(event->aux.flags); in perf_event__aux_swap()
369 swap_sample_id_all(event, &event->aux + 1); in perf_event__aux_swap()
375 event->itrace_start.pid = bswap_32(event->itrace_start.pid); in perf_event__itrace_start_swap()
376 event->itrace_start.tid = bswap_32(event->itrace_start.tid); in perf_event__itrace_start_swap()
379 swap_sample_id_all(event, &event->itrace_start + 1); in perf_event__itrace_start_swap()
384 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { in perf_event__switch_swap()
385 event->context_switch.next_prev_pid = in perf_event__switch_swap()
386 bswap_32(event->context_switch.next_prev_pid); in perf_event__switch_swap()
387 event->context_switch.next_prev_tid = in perf_event__switch_swap()
388 bswap_32(event->context_switch.next_prev_tid); in perf_event__switch_swap()
392 swap_sample_id_all(event, &event->context_switch + 1); in perf_event__switch_swap()
397 event->text_poke.addr = bswap_64(event->text_poke.addr); in perf_event__text_poke_swap()
398 event->text_poke.old_len = bswap_16(event->text_poke.old_len); in perf_event__text_poke_swap()
399 event->text_poke.new_len = bswap_16(event->text_poke.new_len); in perf_event__text_poke_swap()
402 size_t len = sizeof(event->text_poke.old_len) + in perf_event__text_poke_swap()
403 sizeof(event->text_poke.new_len) + in perf_event__text_poke_swap()
404 event->text_poke.old_len + in perf_event__text_poke_swap()
405 event->text_poke.new_len; in perf_event__text_poke_swap()
406 void *data = &event->text_poke.old_len; in perf_event__text_poke_swap()
416 event->throttle.time = bswap_64(event->throttle.time); in perf_event__throttle_swap()
417 event->throttle.id = bswap_64(event->throttle.id); in perf_event__throttle_swap()
418 event->throttle.stream_id = bswap_64(event->throttle.stream_id); in perf_event__throttle_swap()
421 swap_sample_id_all(event, &event->throttle + 1); in perf_event__throttle_swap()
429 event->namespaces.pid = bswap_32(event->namespaces.pid); in perf_event__namespaces_swap()
430 event->namespaces.tid = bswap_32(event->namespaces.tid); in perf_event__namespaces_swap()
431 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); in perf_event__namespaces_swap()
433 for (i = 0; i < event->namespaces.nr_namespaces; i++) { in perf_event__namespaces_swap()
434 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; in perf_event__namespaces_swap() local
436 ns->dev = bswap_64(ns->dev); in perf_event__namespaces_swap()
437 ns->ino = bswap_64(ns->ino); in perf_event__namespaces_swap()
441 swap_sample_id_all(event, &event->namespaces.link_info[i]); in perf_event__namespaces_swap()
446 event->cgroup.id = bswap_64(event->cgroup.id); in perf_event__cgroup_swap()
449 void *data = &event->cgroup.path; in perf_event__cgroup_swap()
468 * Bit-fields are allocated from right to left (least to most significant)
469 * on little-endian implementations and from left to right (most to least
470 * significant) on big-endian implementations.
491 attr->type = bswap_32(attr->type); in perf_event__attr_swap()
492 attr->size = bswap_32(attr->size); in perf_event__attr_swap()
495 (attr->size > (offsetof(struct perf_event_attr, f) + \ in perf_event__attr_swap()
496 sizeof(attr->f) * (n))) in perf_event__attr_swap()
500 attr->f = bswap_##sz(attr->f); \ in perf_event__attr_swap()
526 swap_bitfield((u8 *) (&attr->read_format + 1), in perf_event__attr_swap()
539 perf_event__attr_swap(&event->attr.attr); in perf_event__hdr_attr_swap()
541 size = event->header.size; in perf_event__hdr_attr_swap()
542 size -= perf_record_header_attr_id(event) - (void *)event; in perf_event__hdr_attr_swap()
549 event->event_update.type = bswap_64(event->event_update.type); in perf_event__event_update_swap()
550 event->event_update.id = bswap_64(event->event_update.id); in perf_event__event_update_swap()
556 event->event_type.event_type.event_id = in perf_event__event_type_swap()
557 bswap_64(event->event_type.event_type.event_id); in perf_event__event_type_swap()
563 event->tracing_data.size = bswap_32(event->tracing_data.size); in perf_event__tracing_data_swap()
571 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); in perf_event__auxtrace_info_swap()
573 size = event->header.size; in perf_event__auxtrace_info_swap()
574 size -= (void *)&event->auxtrace_info.priv - (void *)event; in perf_event__auxtrace_info_swap()
575 mem_bswap_64(event->auxtrace_info.priv, size); in perf_event__auxtrace_info_swap()
581 event->auxtrace.size = bswap_64(event->auxtrace.size); in perf_event__auxtrace_swap()
582 event->auxtrace.offset = bswap_64(event->auxtrace.offset); in perf_event__auxtrace_swap()
583 event->auxtrace.reference = bswap_64(event->auxtrace.reference); in perf_event__auxtrace_swap()
584 event->auxtrace.idx = bswap_32(event->auxtrace.idx); in perf_event__auxtrace_swap()
585 event->auxtrace.tid = bswap_32(event->auxtrace.tid); in perf_event__auxtrace_swap()
586 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); in perf_event__auxtrace_swap()
592 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); in perf_event__auxtrace_error_swap()
593 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); in perf_event__auxtrace_error_swap()
594 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); in perf_event__auxtrace_error_swap()
595 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); in perf_event__auxtrace_error_swap()
596 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); in perf_event__auxtrace_error_swap()
597 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); in perf_event__auxtrace_error_swap()
598 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); in perf_event__auxtrace_error_swap()
599 if (event->auxtrace_error.fmt) in perf_event__auxtrace_error_swap()
600 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); in perf_event__auxtrace_error_swap()
601 if (event->auxtrace_error.fmt >= 2) { in perf_event__auxtrace_error_swap()
602 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid); in perf_event__auxtrace_error_swap()
603 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu); in perf_event__auxtrace_error_swap()
612 event->thread_map.nr = bswap_64(event->thread_map.nr); in perf_event__thread_map_swap()
614 for (i = 0; i < event->thread_map.nr; i++) in perf_event__thread_map_swap()
615 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); in perf_event__thread_map_swap()
621 struct perf_record_cpu_map_data *data = &event->cpu_map.data; in perf_event__cpu_map_swap()
623 data->type = bswap_16(data->type); in perf_event__cpu_map_swap()
625 switch (data->type) { in perf_event__cpu_map_swap()
627 data->cpus_data.nr = bswap_16(data->cpus_data.nr); in perf_event__cpu_map_swap()
629 for (unsigned i = 0; i < data->cpus_data.nr; i++) in perf_event__cpu_map_swap()
630 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]); in perf_event__cpu_map_swap()
633 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size); in perf_event__cpu_map_swap()
635 switch (data->mask32_data.long_size) { in perf_event__cpu_map_swap()
637 data->mask32_data.nr = bswap_16(data->mask32_data.nr); in perf_event__cpu_map_swap()
638 for (unsigned i = 0; i < data->mask32_data.nr; i++) in perf_event__cpu_map_swap()
639 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]); in perf_event__cpu_map_swap()
642 data->mask64_data.nr = bswap_16(data->mask64_data.nr); in perf_event__cpu_map_swap()
643 for (unsigned i = 0; i < data->mask64_data.nr; i++) in perf_event__cpu_map_swap()
644 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]); in perf_event__cpu_map_swap()
651 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu); in perf_event__cpu_map_swap()
652 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu); in perf_event__cpu_map_swap()
664 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]); in perf_event__stat_config_swap()
666 mem_bswap_64(&event->stat_config.nr, size); in perf_event__stat_config_swap()
672 event->stat.id = bswap_64(event->stat.id); in perf_event__stat_swap()
673 event->stat.thread = bswap_32(event->stat.thread); in perf_event__stat_swap()
674 event->stat.cpu = bswap_32(event->stat.cpu); in perf_event__stat_swap()
675 event->stat.val = bswap_64(event->stat.val); in perf_event__stat_swap()
676 event->stat.ena = bswap_64(event->stat.ena); in perf_event__stat_swap()
677 event->stat.run = bswap_64(event->stat.run); in perf_event__stat_swap()
683 event->stat_round.type = bswap_64(event->stat_round.type); in perf_event__stat_round_swap()
684 event->stat_round.time = bswap_64(event->stat_round.time); in perf_event__stat_round_swap()
690 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift); in perf_event__time_conv_swap()
691 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult); in perf_event__time_conv_swap()
692 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero); in perf_event__time_conv_swap()
694 if (event_contains(event->time_conv, time_cycles)) { in perf_event__time_conv_swap()
695 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles); in perf_event__time_conv_swap()
696 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask); in perf_event__time_conv_swap()
757 * - | 4 <--- max recorded
765 * 5 | 7 <---- max recorded
775 * - | 10
792 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path); in perf_session__queue_event()
795 static void callchain__lbr_callstack_printf(struct perf_sample *sample) in callchain__lbr_callstack_printf() argument
797 struct ip_callchain *callchain = sample->callchain; in callchain__lbr_callstack_printf()
798 struct branch_stack *lbr_stack = sample->branch_stack; in callchain__lbr_callstack_printf()
799 struct branch_entry *entries = perf_sample__branch_entries(sample); in callchain__lbr_callstack_printf()
800 u64 kernel_callchain_nr = callchain->nr; in callchain__lbr_callstack_printf()
804 if (callchain->ips[i] == PERF_CONTEXT_USER) in callchain__lbr_callstack_printf()
808 if ((i != kernel_callchain_nr) && lbr_stack->nr) { in callchain__lbr_callstack_printf()
820 * "A"->"B"->"C"->"D". in callchain__lbr_callstack_printf()
822 * "C"->"D", "B"->"C", "A"->"B". in callchain__lbr_callstack_printf()
826 total_nr = i + 1 + lbr_stack->nr + 1; in callchain__lbr_callstack_printf()
833 i, callchain->ips[i]); in callchain__lbr_callstack_printf()
837 for (i = 0; i < lbr_stack->nr; i++) in callchain__lbr_callstack_printf()
844 struct perf_sample *sample) in callchain__printf() argument
847 struct ip_callchain *callchain = sample->callchain; in callchain__printf()
850 callchain__lbr_callstack_printf(sample); in callchain__printf()
852 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); in callchain__printf()
854 for (i = 0; i < callchain->nr; i++) in callchain__printf()
856 i, callchain->ips[i]); in callchain__printf()
859 static void branch_stack__printf(struct perf_sample *sample, in branch_stack__printf() argument
862 struct branch_entry *entries = perf_sample__branch_entries(sample); in branch_stack__printf()
864 u64 *branch_stack_cntr = sample->branch_stack_cntr; in branch_stack__printf()
868 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr); in branch_stack__printf()
872 * B()->C() in branch_stack__printf()
873 * A()->B() in branch_stack__printf()
879 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1); in branch_stack__printf()
882 for (i = 0; i < sample->branch_stack->nr; i++) { in branch_stack__printf()
886 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n", in branch_stack__printf()
887 i, e->from, e->to, in branch_stack__printf()
888 (unsigned short)e->flags.cycles, in branch_stack__printf()
889 e->flags.mispred ? "M" : " ", in branch_stack__printf()
890 e->flags.predicted ? "P" : " ", in branch_stack__printf()
891 e->flags.abort ? "A" : " ", in branch_stack__printf()
892 e->flags.in_tx ? "T" : " ", in branch_stack__printf()
893 (unsigned)e->flags.reserved, in branch_stack__printf()
895 e->flags.spec ? branch_spec_desc(e->flags.spec) : ""); in branch_stack__printf()
900 i, e->to, i+1, e->from); in branch_stack__printf()
902 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from); in branch_stack__printf()
912 sample->branch_stack->nr, br_cntr_width, br_cntr_nr); in branch_stack__printf()
913 for (i = 0; i < sample->branch_stack->nr; i++) in branch_stack__printf()
925 printf(".... %-5s 0x%016" PRIx64 "\n", in regs_dump__printf()
932 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
933 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
938 if (d->abi > PERF_SAMPLE_REGS_ABI_64) in regs_dump_abi()
941 return regs_abi[d->abi]; in regs_dump_abi()
946 u64 mask = regs->mask; in regs__printf()
953 regs_dump__printf(mask, regs->regs, arch); in regs__printf()
956 static void regs_user__printf(struct perf_sample *sample, const char *arch) in regs_user__printf() argument
960 if (!sample->user_regs) in regs_user__printf()
963 user_regs = perf_sample__user_regs(sample); in regs_user__printf()
965 if (user_regs->regs) in regs_user__printf()
969 static void regs_intr__printf(struct perf_sample *sample, const char *arch) in regs_intr__printf() argument
973 if (!sample->intr_regs) in regs_intr__printf()
976 intr_regs = perf_sample__intr_regs(sample); in regs_intr__printf()
978 if (intr_regs->regs) in regs_intr__printf()
985 dump->size, dump->offset); in stack_user__printf()
988 …id evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) in evlist__print_tstamp() argument
992 if (event->header.type != PERF_RECORD_SAMPLE && in evlist__print_tstamp()
994 fputs("-1 -1 ", stdout); in evlist__print_tstamp()
999 printf("%u ", sample->cpu); in evlist__print_tstamp()
1002 printf("%" PRIu64 " ", sample->time); in evlist__print_tstamp()
1005 static void sample_read__printf(struct perf_sample *sample, u64 read_format) in sample_read__printf() argument
1010 printf("...... time enabled %016" PRIx64 "\n", in sample_read__printf()
1011 sample->read.time_enabled); in sample_read__printf()
1014 printf("...... time running %016" PRIx64 "\n", in sample_read__printf()
1015 sample->read.time_running); in sample_read__printf()
1018 struct sample_read_value *value = sample->read.group.values; in sample_read__printf()
1020 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); in sample_read__printf()
1022 sample_read_group__for_each(value, sample->read.group.nr, read_format) { in sample_read__printf()
1025 value->id, value->value); in sample_read__printf()
1027 printf(", lost %" PRIu64, value->lost); in sample_read__printf()
1032 sample->read.one.id, sample->read.one.value); in sample_read__printf()
1034 printf(", lost %" PRIu64, sample->read.one.lost); in sample_read__printf()
1040 u64 file_offset, struct perf_sample *sample, in dump_event() argument
1047 file_offset, file_path, event->header.size, event->header.type); in dump_event()
1050 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) in dump_event()
1051 evlist->trace_event_sample_raw(evlist, event, sample); in dump_event()
1053 if (sample) in dump_event()
1054 evlist__print_tstamp(evlist, event, sample); in dump_event()
1057 event->header.size, perf_event__name(event->header.type)); in dump_event()
1069 struct perf_sample *sample, const char *arch) in dump_sample() argument
1078 event->header.misc, sample->pid, sample->tid, sample->ip, in dump_sample()
1079 sample->period, sample->addr); in dump_sample()
1081 sample_type = evsel->core.attr.sample_type; in dump_sample()
1084 callchain__printf(evsel, sample); in dump_sample()
1087 branch_stack__printf(sample, evsel); in dump_sample()
1090 regs_user__printf(sample, arch); in dump_sample()
1093 regs_intr__printf(sample, arch); in dump_sample()
1096 stack_user__printf(&sample->user_stack); in dump_sample()
1099 printf("... weight: %" PRIu64 "", sample->weight); in dump_sample()
1101 printf(",0x%"PRIx16"", sample->ins_lat); in dump_sample()
1102 printf(",0x%"PRIx16"", sample->weight3); in dump_sample()
1108 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); in dump_sample()
1111 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); in dump_sample()
1114 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str)); in dump_sample()
1117 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str)); in dump_sample()
1120 printf("... transaction: %" PRIx64 "\n", sample->transaction); in dump_sample()
1123 sample_read__printf(sample, evsel->core.attr.read_format); in dump_sample()
1128 struct perf_record_read *read_event = &event->read; in dump_read()
1134 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, in dump_read()
1135 evsel__name(evsel), event->read.value); in dump_read()
1140 read_format = evsel->core.attr.read_format; in dump_read()
1143 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); in dump_read()
1146 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); in dump_read()
1149 printf("... id : %" PRI_lu64 "\n", read_event->id); in dump_read()
1152 printf("... lost : %" PRI_lu64 "\n", read_event->lost); in dump_read()
1157 struct perf_sample *sample) in machines__find_for_cpumode() argument
1160 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || in machines__find_for_cpumode()
1161 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { in machines__find_for_cpumode()
1164 if (sample->machine_pid) in machines__find_for_cpumode()
1165 pid = sample->machine_pid; in machines__find_for_cpumode()
1166 else if (event->header.type == PERF_RECORD_MMAP in machines__find_for_cpumode()
1167 || event->header.type == PERF_RECORD_MMAP2) in machines__find_for_cpumode()
1168 pid = event->mmap.pid; in machines__find_for_cpumode()
1170 pid = sample->pid; in machines__find_for_cpumode()
1182 return &machines->host; in machines__find_for_cpumode()
1188 struct perf_sample *sample, in deliver_sample_value() argument
1193 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id); in deliver_sample_value()
1198 storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread); in deliver_sample_value()
1202 sample->id = v->id; in deliver_sample_value()
1203 sample->period = v->value - *storage; in deliver_sample_value()
1204 *storage = v->value; in deliver_sample_value()
1207 if (!storage || sid->evsel == NULL) { in deliver_sample_value()
1208 ++evlist->stats.nr_unknown_id; in deliver_sample_value()
1213 * There's no reason to deliver sample in deliver_sample_value()
1216 if (!sample->period) in deliver_sample_value()
1219 evsel = container_of(sid->evsel, struct evsel, core); in deliver_sample_value()
1220 return tool->sample(tool, event, sample, evsel, machine); in deliver_sample_value()
1226 struct perf_sample *sample, in deliver_sample_group() argument
1231 int ret = -EINVAL; in deliver_sample_group()
1232 struct sample_read_value *v = sample->read.group.values; in deliver_sample_group()
1234 if (tool->dont_split_sample_group) in deliver_sample_group()
1235 return deliver_sample_value(evlist, tool, event, sample, v, machine, in deliver_sample_group()
1238 sample_read_group__for_each(v, sample->read.group.nr, read_format) { in deliver_sample_group()
1239 ret = deliver_sample_value(evlist, tool, event, sample, v, in deliver_sample_group()
1249 union perf_event *event, struct perf_sample *sample, in evlist__deliver_sample() argument
1253 u64 sample_type = evsel->core.attr.sample_type; in evlist__deliver_sample()
1254 u64 read_format = evsel->core.attr.read_format; in evlist__deliver_sample()
1255 bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core); in evlist__deliver_sample()
1257 /* Standard sample delivery. */ in evlist__deliver_sample()
1259 return tool->sample(tool, event, sample, evsel, machine); in evlist__deliver_sample()
1263 return deliver_sample_group(evlist, tool, event, sample, in evlist__deliver_sample()
1266 return deliver_sample_value(evlist, tool, event, sample, in evlist__deliver_sample()
1267 &sample->read.one, machine, in evlist__deliver_sample()
1274 struct perf_sample *sample, in machines__deliver_event() argument
1281 dump_event(evlist, event, file_offset, sample, file_path); in machines__deliver_event()
1283 evsel = evlist__id2evsel(evlist, sample->id); in machines__deliver_event()
1285 machine = machines__find_for_cpumode(machines, event, sample); in machines__deliver_event()
1287 switch (event->header.type) { in machines__deliver_event()
1290 ++evlist->stats.nr_unknown_id; in machines__deliver_event()
1294 ++evlist->stats.nr_unprocessable_samples; in machines__deliver_event()
1295 dump_sample(evsel, event, sample, perf_env__arch(NULL)); in machines__deliver_event()
1298 dump_sample(evsel, event, sample, perf_env__arch(machine->env)); in machines__deliver_event()
1299 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); in machines__deliver_event()
1301 return tool->mmap(tool, event, sample, machine); in machines__deliver_event()
1303 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) in machines__deliver_event()
1304 ++evlist->stats.nr_proc_map_timeout; in machines__deliver_event()
1305 return tool->mmap2(tool, event, sample, machine); in machines__deliver_event()
1307 return tool->comm(tool, event, sample, machine); in machines__deliver_event()
1309 return tool->namespaces(tool, event, sample, machine); in machines__deliver_event()
1311 return tool->cgroup(tool, event, sample, machine); in machines__deliver_event()
1313 return tool->fork(tool, event, sample, machine); in machines__deliver_event()
1315 return tool->exit(tool, event, sample, machine); in machines__deliver_event()
1317 if (tool->lost == perf_event__process_lost) in machines__deliver_event()
1318 evlist->stats.total_lost += event->lost.lost; in machines__deliver_event()
1319 return tool->lost(tool, event, sample, machine); in machines__deliver_event()
1321 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF) in machines__deliver_event()
1322 evlist->stats.total_dropped_samples += event->lost_samples.lost; in machines__deliver_event()
1323 else if (tool->lost_samples == perf_event__process_lost_samples) in machines__deliver_event()
1324 evlist->stats.total_lost_samples += event->lost_samples.lost; in machines__deliver_event()
1325 return tool->lost_samples(tool, event, sample, machine); in machines__deliver_event()
1328 return tool->read(tool, event, sample, evsel, machine); in machines__deliver_event()
1330 return tool->throttle(tool, event, sample, machine); in machines__deliver_event()
1332 return tool->unthrottle(tool, event, sample, machine); in machines__deliver_event()
1334 if (tool->aux == perf_event__process_aux) { in machines__deliver_event()
1335 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) in machines__deliver_event()
1336 evlist->stats.total_aux_lost += 1; in machines__deliver_event()
1337 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) in machines__deliver_event()
1338 evlist->stats.total_aux_partial += 1; in machines__deliver_event()
1339 if (event->aux.flags & PERF_AUX_FLAG_COLLISION) in machines__deliver_event()
1340 evlist->stats.total_aux_collision += 1; in machines__deliver_event()
1342 return tool->aux(tool, event, sample, machine); in machines__deliver_event()
1344 return tool->itrace_start(tool, event, sample, machine); in machines__deliver_event()
1347 return tool->context_switch(tool, event, sample, machine); in machines__deliver_event()
1349 return tool->ksymbol(tool, event, sample, machine); in machines__deliver_event()
1351 return tool->bpf(tool, event, sample, machine); in machines__deliver_event()
1353 return tool->text_poke(tool, event, sample, machine); in machines__deliver_event()
1355 return tool->aux_output_hw_id(tool, event, sample, machine); in machines__deliver_event()
1357 ++evlist->stats.nr_unknown_events; in machines__deliver_event()
1358 return -1; in machines__deliver_event()
1368 struct perf_sample sample; in perf_session__deliver_event() local
1371 perf_sample__init(&sample, /*all=*/false); in perf_session__deliver_event()
1372 ret = evlist__parse_sample(session->evlist, event, &sample); in perf_session__deliver_event()
1374 pr_err("Can't parse sample, err = %d\n", ret); in perf_session__deliver_event()
1378 ret = auxtrace__process_event(session, event, &sample, tool); in perf_session__deliver_event()
1386 ret = machines__deliver_event(&session->machines, session->evlist, in perf_session__deliver_event()
1387 event, &sample, tool, file_offset, file_path); in perf_session__deliver_event()
1389 if (dump_trace && sample.aux_sample.size) in perf_session__deliver_event()
1390 auxtrace__dump_auxtrace_sample(session, &sample); in perf_session__deliver_event()
1392 perf_sample__exit(&sample); in perf_session__deliver_event()
1401 struct ordered_events *oe = &session->ordered_events; in perf_session__process_user_event()
1402 const struct perf_tool *tool = session->tool; in perf_session__process_user_event()
1403 struct perf_sample sample; in perf_session__process_user_event() local
1404 int fd = perf_data__fd(session->data); in perf_session__process_user_event()
1407 perf_sample__init(&sample, /*all=*/true); in perf_session__process_user_event()
1408 if ((event->header.type != PERF_RECORD_COMPRESSED && in perf_session__process_user_event()
1409 event->header.type != PERF_RECORD_COMPRESSED2) || in perf_session__process_user_event()
1411 dump_event(session->evlist, event, file_offset, &sample, file_path); in perf_session__process_user_event()
1414 switch (event->header.type) { in perf_session__process_user_event()
1416 err = tool->attr(tool, event, &session->evlist); in perf_session__process_user_event()
1423 err = tool->event_update(tool, event, &session->evlist); in perf_session__process_user_event()
1438 if (!perf_data__is_pipe(session->data)) in perf_session__process_user_event()
1440 err = tool->tracing_data(session, event); in perf_session__process_user_event()
1443 err = tool->build_id(session, event); in perf_session__process_user_event()
1446 err = tool->finished_round(tool, event, oe); in perf_session__process_user_event()
1449 err = tool->id_index(session, event); in perf_session__process_user_event()
1452 err = tool->auxtrace_info(session, event); in perf_session__process_user_event()
1460 if (!perf_data__is_pipe(session->data)) in perf_session__process_user_event()
1461 lseek(fd, file_offset + event->header.size, SEEK_SET); in perf_session__process_user_event()
1462 err = tool->auxtrace(session, event); in perf_session__process_user_event()
1466 err = tool->auxtrace_error(session, event); in perf_session__process_user_event()
1469 err = tool->thread_map(session, event); in perf_session__process_user_event()
1472 err = tool->cpu_map(session, event); in perf_session__process_user_event()
1475 err = tool->stat_config(session, event); in perf_session__process_user_event()
1478 err = tool->stat(session, event); in perf_session__process_user_event()
1481 err = tool->stat_round(session, event); in perf_session__process_user_event()
1484 session->time_conv = event->time_conv; in perf_session__process_user_event()
1485 err = tool->time_conv(session, event); in perf_session__process_user_event()
1488 err = tool->feature(session, event); in perf_session__process_user_event()
1492 err = tool->compressed(session, event, file_offset, file_path); in perf_session__process_user_event()
1494 dump_event(session->evlist, event, file_offset, &sample, file_path); in perf_session__process_user_event()
1497 err = tool->finished_init(session, event); in perf_session__process_user_event()
1500 err = tool->bpf_metadata(session, event); in perf_session__process_user_event()
1503 err = -EINVAL; in perf_session__process_user_event()
1506 perf_sample__exit(&sample); in perf_session__process_user_event()
1512 struct perf_sample *sample) in perf_session__deliver_synth_event() argument
1514 struct evlist *evlist = session->evlist; in perf_session__deliver_synth_event()
1515 const struct perf_tool *tool = session->tool; in perf_session__deliver_synth_event()
1517 events_stats__inc(&evlist->stats, event->header.type); in perf_session__deliver_synth_event()
1519 if (event->header.type >= PERF_RECORD_USER_TYPE_START) in perf_session__deliver_synth_event()
1522 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL); in perf_session__deliver_synth_event()
1541 if (attr->size != sizeof(ev.attr_id.attr.attr)) { in perf_session__deliver_synth_attr_event()
1543 return -EINVAL; in perf_session__deliver_synth_attr_event()
1553 swap = perf_event__swap_ops[event->header.type]; in event_swap()
1561 struct perf_sample *sample) in perf_session__peek_event() argument
1567 if (session->one_mmap && !session->header.needs_swap) { in perf_session__peek_event()
1568 event = file_offset - session->one_mmap_offset + in perf_session__peek_event()
1569 session->one_mmap_addr; in perf_session__peek_event()
1573 if (perf_data__is_pipe(session->data)) in perf_session__peek_event()
1574 return -1; in perf_session__peek_event()
1576 fd = perf_data__fd(session->data); in perf_session__peek_event()
1580 return -1; in perf_session__peek_event()
1582 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || in perf_session__peek_event()
1584 return -1; in perf_session__peek_event()
1588 if (session->header.needs_swap) in perf_session__peek_event()
1589 perf_event_header__bswap(&event->header); in perf_session__peek_event()
1591 if (event->header.size < hdr_sz || event->header.size > buf_sz) in perf_session__peek_event()
1592 return -1; in perf_session__peek_event()
1595 rest = event->header.size - hdr_sz; in perf_session__peek_event()
1598 return -1; in perf_session__peek_event()
1600 if (session->header.needs_swap) in perf_session__peek_event()
1601 event_swap(event, evlist__sample_id_all(session->evlist)); in perf_session__peek_event()
1605 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && in perf_session__peek_event()
1606 evlist__parse_sample(session->evlist, event, sample)) in perf_session__peek_event()
1607 return -1; in perf_session__peek_event()
1633 offset += event->header.size; in perf_session__peek_events()
1634 if (event->header.type == PERF_RECORD_AUXTRACE) in perf_session__peek_events()
1635 offset += event->auxtrace.size; in perf_session__peek_events()
1646 struct evlist *evlist = session->evlist; in perf_session__process_event()
1647 const struct perf_tool *tool = session->tool; in perf_session__process_event()
1650 if (session->header.needs_swap) in perf_session__process_event()
1653 if (event->header.type >= PERF_RECORD_HEADER_MAX) { in perf_session__process_event()
1655 if (event->header.size % sizeof(u64)) in perf_session__process_event()
1656 return -EINVAL; in perf_session__process_event()
1660 event->header.type); in perf_session__process_event()
1662 return event->header.size; in perf_session__process_event()
1665 events_stats__inc(&evlist->stats, event->header.type); in perf_session__process_event()
1667 if (event->header.type >= PERF_RECORD_USER_TYPE_START) in perf_session__process_event()
1670 if (tool->ordered_events) { in perf_session__process_event()
1671 u64 timestamp = -1ULL; in perf_session__process_event()
1674 if (ret && ret != -1) in perf_session__process_event()
1678 if (ret != -ETIME) in perf_session__process_event()
1687 hdr->type = bswap_32(hdr->type); in perf_event_header__bswap()
1688 hdr->misc = bswap_16(hdr->misc); in perf_event_header__bswap()
1689 hdr->size = bswap_16(hdr->size); in perf_event_header__bswap()
1694 return machine__findnew_thread(&session->machines.host, -1, pid); in perf_session__findnew()
1699 struct thread *thread = machine__idle_thread(&session->machines.host); in perf_session__register_idle_thread()
1703 return thread ? 0 : -1; in perf_session__register_idle_thread()
1709 const struct ordered_events *oe = &session->ordered_events; in perf_session__warn_order()
1713 evlist__for_each_entry(session->evlist, evsel) { in perf_session__warn_order()
1714 if (evsel->core.attr.write_backward) in perf_session__warn_order()
1720 if (oe->nr_unordered_events != 0) in perf_session__warn_order()
1721 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); in perf_session__warn_order()
1726 const struct events_stats *stats = &session->evlist->stats; in perf_session__warn_about_errors()
1728 if (session->tool->lost == perf_event__process_lost && in perf_session__warn_about_errors()
1729 stats->nr_events[PERF_RECORD_LOST] != 0) { in perf_session__warn_about_errors()
1732 stats->nr_events[0], in perf_session__warn_about_errors()
1733 stats->nr_events[PERF_RECORD_LOST]); in perf_session__warn_about_errors()
1736 if (session->tool->lost_samples == perf_event__process_lost_samples) { in perf_session__warn_about_errors()
1739 drop_rate = (double)stats->total_lost_samples / in perf_session__warn_about_errors()
1740 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); in perf_session__warn_about_errors()
1743 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, in perf_session__warn_about_errors()
1748 if (session->tool->aux == perf_event__process_aux && in perf_session__warn_about_errors()
1749 stats->total_aux_lost != 0) { in perf_session__warn_about_errors()
1751 stats->total_aux_lost, in perf_session__warn_about_errors()
1752 stats->nr_events[PERF_RECORD_AUX]); in perf_session__warn_about_errors()
1755 if (session->tool->aux == perf_event__process_aux && in perf_session__warn_about_errors()
1756 stats->total_aux_partial != 0) { in perf_session__warn_about_errors()
1764 stats->total_aux_partial, in perf_session__warn_about_errors()
1765 stats->nr_events[PERF_RECORD_AUX], in perf_session__warn_about_errors()
1772 if (session->tool->aux == perf_event__process_aux && in perf_session__warn_about_errors()
1773 stats->total_aux_collision != 0) { in perf_session__warn_about_errors()
1775 stats->total_aux_collision, in perf_session__warn_about_errors()
1776 stats->nr_events[PERF_RECORD_AUX]); in perf_session__warn_about_errors()
1779 if (stats->nr_unknown_events != 0) { in perf_session__warn_about_errors()
1784 "reporting to linux-kernel@vger.kernel.org.\n\n", in perf_session__warn_about_errors()
1785 stats->nr_unknown_events); in perf_session__warn_about_errors()
1788 if (stats->nr_unknown_id != 0) { in perf_session__warn_about_errors()
1790 stats->nr_unknown_id); in perf_session__warn_about_errors()
1793 if (stats->nr_invalid_chains != 0) { in perf_session__warn_about_errors()
1796 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", in perf_session__warn_about_errors()
1797 stats->nr_invalid_chains, in perf_session__warn_about_errors()
1798 stats->nr_events[PERF_RECORD_SAMPLE]); in perf_session__warn_about_errors()
1801 if (stats->nr_unprocessable_samples != 0) { in perf_session__warn_about_errors()
1804 stats->nr_unprocessable_samples); in perf_session__warn_about_errors()
1811 if (stats->nr_proc_map_timeout != 0) { in perf_session__warn_about_errors()
1812 ui__warning("%d map information files for pre-existing threads were\n" in perf_session__warn_about_errors()
1815 "threads by running with -v and redirecting the output\n" in perf_session__warn_about_errors()
1817 "The time limit to process proc map is too short?\n" in perf_session__warn_about_errors()
1818 "Increase it by --proc-map-timeout\n", in perf_session__warn_about_errors()
1819 stats->nr_proc_map_timeout); in perf_session__warn_about_errors()
1831 return machines__for_each_thread(&session->machines, in perf_session__flush_thread_stacks()
1842 struct ordered_events *oe = &session->ordered_events; in __perf_session__process_pipe_events()
1843 const struct perf_tool *tool = session->tool; in __perf_session__process_pipe_events()
1856 * a file name other than "-". Then we can get the total size and show in __perf_session__process_pipe_events()
1859 if (strcmp(session->data->path, "-") && session->data->file.size) { in __perf_session__process_pipe_events()
1860 ui_progress__init_size(&prog, session->data->file.size, in __perf_session__process_pipe_events()
1870 return -errno; in __perf_session__process_pipe_events()
1874 err = perf_data__read(session->data, event, in __perf_session__process_pipe_events()
1884 if (session->header.needs_swap) in __perf_session__process_pipe_events()
1885 perf_event_header__bswap(&event->header); in __perf_session__process_pipe_events()
1887 size = event->header.size; in __perf_session__process_pipe_events()
1906 if (size - sizeof(struct perf_event_header)) { in __perf_session__process_pipe_events()
1907 err = perf_data__read(session->data, p, in __perf_session__process_pipe_events()
1908 size - sizeof(struct perf_event_header)); in __perf_session__process_pipe_events()
1922 head, event->header.size, event->header.type); in __perf_session__process_pipe_events()
1923 err = -EINVAL; in __perf_session__process_pipe_events()
1954 if (!tool->no_warn) in __perf_session__process_pipe_events()
1956 ordered_events__free(&session->ordered_events); in __perf_session__process_pipe_events()
1972 if (head + sizeof(event->header) > mmap_size) in prefetch_event()
1977 perf_event_header__bswap(&event->header); in prefetch_event()
1979 event_size = event->header.size; in prefetch_event()
1985 perf_event_header__bswap(&event->header); in prefetch_event()
1988 if (event_size <= mmap_size - head % page_size) { in prefetch_event()
1994 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:" in prefetch_event()
2003 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); in fetch_mmaped_event()
2016 struct decomp *decomp = session->active_decomp->decomp_last; in __perf_session__process_decomp_events()
2021 while (decomp->head < decomp->size && !session_done()) { in __perf_session__process_decomp_events()
2022 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, in __perf_session__process_decomp_events()
2023 session->header.needs_swap); in __perf_session__process_decomp_events()
2028 size = event->header.size; in __perf_session__process_decomp_events()
2031 (skip = perf_session__process_event(session, event, decomp->file_pos, in __perf_session__process_decomp_events()
2032 decomp->file_path)) < 0) { in __perf_session__process_decomp_events()
2034 decomp->file_pos + decomp->head, event->header.size, event->header.type); in __perf_session__process_decomp_events()
2035 return -EINVAL; in __perf_session__process_decomp_events()
2041 decomp->head += size; in __perf_session__process_decomp_events()
2089 u64 data_size = rd->data_size; in reader__init()
2090 char **mmaps = rd->mmaps; in reader__init()
2092 rd->head = rd->data_offset; in reader__init()
2093 data_size += rd->data_offset; in reader__init()
2095 rd->mmap_size = MMAP_SIZE; in reader__init()
2096 if (rd->mmap_size > data_size) { in reader__init()
2097 rd->mmap_size = data_size; in reader__init()
2102 memset(mmaps, 0, sizeof(rd->mmaps)); in reader__init()
2104 if (zstd_init(&rd->zstd_data, 0)) in reader__init()
2105 return -1; in reader__init()
2106 rd->decomp_data.zstd_decomp = &rd->zstd_data; in reader__init()
2114 perf_decomp__release_events(rd->decomp_data.decomp); in reader__release_decomp()
2115 zstd_fini(&rd->zstd_data); in reader__release_decomp()
2122 char *buf, **mmaps = rd->mmaps; in reader__mmap()
2128 if (rd->in_place_update) { in reader__mmap()
2130 } else if (session->header.needs_swap) { in reader__mmap()
2135 if (mmaps[rd->mmap_idx]) { in reader__mmap()
2136 munmap(mmaps[rd->mmap_idx], rd->mmap_size); in reader__mmap()
2137 mmaps[rd->mmap_idx] = NULL; in reader__mmap()
2140 page_offset = page_size * (rd->head / page_size); in reader__mmap()
2141 rd->file_offset += page_offset; in reader__mmap()
2142 rd->head -= page_offset; in reader__mmap()
2144 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd, in reader__mmap()
2145 rd->file_offset); in reader__mmap()
2148 return -errno; in reader__mmap()
2150 mmaps[rd->mmap_idx] = rd->mmap_cur = buf; in reader__mmap()
2151 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1); in reader__mmap()
2152 rd->file_pos = rd->file_offset + rd->head; in reader__mmap()
2153 if (session->one_mmap) { in reader__mmap()
2154 session->one_mmap_addr = buf; in reader__mmap()
2155 session->one_mmap_offset = rd->file_offset; in reader__mmap()
2175 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur, in reader__read_event()
2176 session->header.needs_swap); in reader__read_event()
2183 size = event->header.size; in reader__read_event()
2185 skip = -EINVAL; in reader__read_event()
2188 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) { in reader__read_event()
2190 rd->file_offset + rd->head, event->header.size, in reader__read_event()
2191 event->header.type, strerror(-skip)); in reader__read_event()
2199 rd->size += size; in reader__read_event()
2200 rd->head += size; in reader__read_event()
2201 rd->file_pos += size; in reader__read_event()
2216 return (rd->file_pos >= rd->data_size + rd->data_offset); in reader__eof()
2225 err = reader__init(rd, &session->one_mmap); in reader__process_events()
2229 session->active_decomp = &rd->decomp_data; in reader__process_events()
2250 session->active_decomp = &session->decomp_data; in reader__process_events()
2265 .fd = perf_data__fd(session->data), in __perf_session__process_events()
2266 .path = session->data->file.path, in __perf_session__process_events()
2267 .data_size = session->header.data_size, in __perf_session__process_events()
2268 .data_offset = session->header.data_offset, in __perf_session__process_events()
2270 .in_place_update = session->data->in_place_update, in __perf_session__process_events()
2272 struct ordered_events *oe = &session->ordered_events; in __perf_session__process_events()
2273 const struct perf_tool *tool = session->tool; in __perf_session__process_events()
2278 return -1; in __perf_session__process_events()
2295 if (!tool->no_warn) in __perf_session__process_events()
2301 ordered_events__reinit(&session->ordered_events); in __perf_session__process_events()
2304 session->one_mmap = false; in __perf_session__process_events()
2318 * data file holds per-cpu data, already sorted by kernel.
2322 struct perf_data *data = session->data; in __perf_session__process_dir_events()
2323 const struct perf_tool *tool = session->tool; in __perf_session__process_dir_events()
2326 u64 total_size = perf_data__size(session->data); in __perf_session__process_dir_events()
2332 for (i = 0; i < data->dir.nr; i++) { in __perf_session__process_dir_events()
2333 if (data->dir.files[i].size) in __perf_session__process_dir_events()
2339 return -ENOMEM; in __perf_session__process_dir_events()
2342 .fd = perf_data__fd(session->data), in __perf_session__process_dir_events()
2343 .path = session->data->file.path, in __perf_session__process_dir_events()
2344 .data_size = session->header.data_size, in __perf_session__process_dir_events()
2345 .data_offset = session->header.data_offset, in __perf_session__process_dir_events()
2347 .in_place_update = session->data->in_place_update, in __perf_session__process_dir_events()
2357 for (i = 0; i < data->dir.nr; i++) { in __perf_session__process_dir_events()
2358 if (!data->dir.files[i].size) in __perf_session__process_dir_events()
2361 .fd = data->dir.files[i].fd, in __perf_session__process_dir_events()
2362 .path = data->dir.files[i].path, in __perf_session__process_dir_events()
2363 .data_size = data->dir.files[i].size, in __perf_session__process_dir_events()
2366 .in_place_update = session->data->in_place_update, in __perf_session__process_dir_events()
2388 readers--; in __perf_session__process_dir_events()
2392 session->active_decomp = &rd[i].decomp_data; in __perf_session__process_dir_events()
2408 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL); in __perf_session__process_dir_events()
2416 if (!tool->no_warn) in __perf_session__process_dir_events()
2423 ordered_events__reinit(&session->ordered_events); in __perf_session__process_dir_events()
2425 session->one_mmap = false; in __perf_session__process_dir_events()
2427 session->active_decomp = &session->decomp_data; in __perf_session__process_dir_events()
2438 return -ENOMEM; in perf_session__process_events()
2440 if (perf_data__is_pipe(session->data)) in perf_session__process_events()
2443 if (perf_data__is_dir(session->data) && session->data->dir.nr) in perf_session__process_events()
2453 evlist__for_each_entry(session->evlist, evsel) { in perf_session__has_traces()
2454 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) in perf_session__has_traces()
2458 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); in perf_session__has_traces()
2466 evlist__for_each_entry(session->evlist, evsel) { in perf_session__has_switch_events()
2467 if (evsel->core.attr.context_switch) in perf_session__has_switch_events()
2482 return -ENOMEM; in map__set_kallsyms_ref_reloc_sym()
2484 ref->name = strdup(symbol_name); in map__set_kallsyms_ref_reloc_sym()
2485 if (ref->name == NULL) { in map__set_kallsyms_ref_reloc_sym()
2487 return -ENOMEM; in map__set_kallsyms_ref_reloc_sym()
2490 bracket = strchr(ref->name, ']'); in map__set_kallsyms_ref_reloc_sym()
2494 ref->addr = addr; in map__set_kallsyms_ref_reloc_sym()
2498 kmap->ref_reloc_sym = ref; in map__set_kallsyms_ref_reloc_sym()
2505 return machines__fprintf_dsos(&session->machines, fp); in perf_session__fprintf_dsos()
2511 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); in perf_session__fprintf_dsos_buildid()
2519 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) in perf_session__fprintf_nr_events()
2524 ret += events_stats__fprintf(&session->evlist->stats, fp); in perf_session__fprintf_nr_events()
2534 return machine__fprintf(&session->machines.host, fp); in perf_session__fprintf()
2544 maps__fprintf(machine__kernel_maps(&session->machines.host), stderr); in perf_session__dump_kmaps()
2553 evlist__for_each_entry(session->evlist, pos) { in perf_session__find_first_evtype()
2554 if (pos->core.attr.type == type) in perf_session__find_first_evtype()
2563 int i, err = -1; in perf_session__cpu_bitmap()
2565 int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS); in perf_session__cpu_bitmap()
2575 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { in perf_session__cpu_bitmap()
2577 "Remove -C option to proceed.\n"); in perf_session__cpu_bitmap()
2578 return -1; in perf_session__cpu_bitmap()
2585 return -1; in perf_session__cpu_bitmap()
2618 struct machine *machine = machines__findnew(&session->machines, machine_pid); in perf_session__register_guest()
2622 return -ENOMEM; in perf_session__register_guest()
2624 machine->single_address_space = session->machines.host.single_address_space; in perf_session__register_guest()
2628 return -ENOMEM; in perf_session__register_guest()
2631 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid); in perf_session__register_guest()
2639 struct machine *machine = &session->machines.host; in perf_session__set_guest_cpu()
2643 return -ENOMEM; in perf_session__set_guest_cpu()
2653 struct evlist *evlist = session->evlist; in perf_event__process_id_index()
2654 struct perf_record_id_index *ie = &event->id_index; in perf_event__process_id_index()
2655 size_t sz = ie->header.size - sizeof(*ie); in perf_event__process_id_index()
2664 nr = ie->nr; in perf_event__process_id_index()
2667 return -EINVAL; in perf_event__process_id_index()
2674 return -EINVAL; in perf_event__process_id_index()
2685 struct id_index_entry *e = &ie->entries[i]; in perf_event__process_id_index()
2690 fprintf(stdout, " ... id: %"PRI_lu64, e->id); in perf_event__process_id_index()
2691 fprintf(stdout, " idx: %"PRI_lu64, e->idx); in perf_event__process_id_index()
2692 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); in perf_event__process_id_index()
2693 fprintf(stdout, " tid: %"PRI_ld64, e->tid); in perf_event__process_id_index()
2695 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid); in perf_event__process_id_index()
2696 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu); in perf_event__process_id_index()
2702 sid = evlist__id2sid(evlist, e->id); in perf_event__process_id_index()
2704 return -ENOENT; in perf_event__process_id_index()
2706 sid->idx = e->idx; in perf_event__process_id_index()
2707 sid->cpu.cpu = e->cpu; in perf_event__process_id_index()
2708 sid->tid = e->tid; in perf_event__process_id_index()
2713 sid->machine_pid = e2->machine_pid; in perf_event__process_id_index()
2714 sid->vcpu.cpu = e2->vcpu; in perf_event__process_id_index()
2716 if (!sid->machine_pid) in perf_event__process_id_index()
2719 if (sid->machine_pid != last_pid) { in perf_event__process_id_index()
2720 ret = perf_session__register_guest(session, sid->machine_pid); in perf_event__process_id_index()
2723 last_pid = sid->machine_pid; in perf_event__process_id_index()
2727 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu); in perf_event__process_id_index()
2739 err = machine__hit_all_dsos(&session->machines.host); in perf_session__dsos_hit_all()
2743 for (nd = rb_first_cached(&session->machines.guests); nd; in perf_session__dsos_hit_all()
2757 return &session->header.env; in perf_session__env()