Lines Matching full:trace
2 * builtin-trace.c
4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
58 #include "util/trace.h"
60 #include "trace/beauty/beauty.h"
61 #include "trace-event.h"
145 struct trace {
148 /** Sorted sycall numbers used by the trace. */
241 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused)
244 if (trace->btf != NULL)
247 trace->btf = btf__load_vmlinux_btf();
249 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" :
342 * The evsel->priv as used by 'perf trace'
783 #include "trace/beauty/generated/fsconfig_arrays.c"
973 struct btf *btf = arg->trace->btf;
995 trace__load_vmlinux_btf(arg->trace);
997 btf = arg->trace->btf;
1062 dump_data_opts.skip_names = !arg->trace->show_arg_names;
1081 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf,
1086 if (trace->btf == NULL)
1091 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type);
1099 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val);
1101 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg);
1107 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused,
1133 #include "trace/beauty/eventfd.c"
1134 #include "trace/beauty/futex_op.c"
1135 #include "trace/beauty/futex_val3.c"
1136 #include "trace/beauty/mmap.c"
1137 #include "trace/beauty/mode_t.c"
1138 #include "trace/beauty/msg_flags.c"
1139 #include "trace/beauty/open_flags.c"
1140 #include "trace/beauty/perf_event_open.c"
1141 #include "trace/beauty/pid.c"
1142 #include "trace/beauty/sched_policy.c"
1143 #include "trace/beauty/seccomp.c"
1144 #include "trace/beauty/signum.c"
1145 #include "trace/beauty/socket_type.c"
1146 #include "trace/beauty/waitid_options.c"
1585 static struct thread_trace *thread_trace__new(struct trace *trace)
1591 if (trace->summary) {
1617 static struct thread_trace *thread__trace(struct thread *thread, struct trace *trace)
1625 thread__set_priv(thread, thread_trace__new(trace));
1635 color_fprintf(trace->output, PERF_COLOR_RED,
1741 struct trace *trace)
1745 if (ttrace == NULL || trace->fd_path_disabled)
1752 if (!trace->live)
1754 ++trace->stats.proc_getname;
1766 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1774 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1777 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1780 const char *path = thread__fd_path(thread, fd, trace);
1837 if (!arg->trace->vfs_getname)
1870 static bool trace__filter_duration(struct trace *trace, double t)
1872 return t < (trace->duration_filter * NSEC_PER_MSEC);
1875 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1877 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1888 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1891 return __trace__fprintf_tstamp(trace, tstamp, fp);
1912 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1916 if (trace->multiple_threads) {
1917 if (trace->show_comm)
1925 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1930 if (trace->show_tstamp)
1931 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1932 if (trace->show_duration)
1934 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1937 static int trace__process_event(struct trace *trace, struct machine *machine,
1944 color_fprintf(trace->output, PERF_COLOR_RED,
1961 struct trace *trace = container_of(tool, struct trace, tool);
1962 return trace__process_event(trace, machine, event, sample);
1983 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1990 trace->host = machine__new_host();
1991 if (trace->host == NULL)
1996 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
2000 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
2010 static void trace__symbols__exit(struct trace *trace)
2012 machine__exit(trace->host);
2013 trace->host = NULL;
2134 static int syscall__read_info(struct syscall *sc, struct trace *trace)
2166 * Fails to read trace point format via sysfs node, so the trace point
2201 trace__load_vmlinux_btf(trace);
2229 static int trace__validate_ev_qualifier(struct trace *trace)
2234 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
2236 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
2237 sizeof(trace->ev_qualifier_ids.entries[0]));
2239 if (trace->ev_qualifier_ids.entries == NULL) {
2241 trace->output);
2246 strlist__for_each_entry(pos, trace->ev_qualifier) {
2270 trace->ev_qualifier_ids.entries[nr_used++] = id;
2282 entries = realloc(trace->ev_qualifier_ids.entries,
2283 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
2286 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
2289 trace->ev_qualifier_ids.entries = entries;
2291 trace->ev_qualifier_ids.entries[nr_used++] = id;
2295 trace->ev_qualifier_ids.nr = nr_used;
2296 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
2302 zfree(&trace->ev_qualifier_ids.entries);
2303 trace->ev_qualifier_ids.nr = 0;
2307 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
2311 if (trace->ev_qualifier_ids.nr == 0)
2314 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
2315 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
2318 return !trace->not_ev_qualifier;
2320 return trace->not_ev_qualifier;
2352 * in tools/perf/trace/beauty/mount_flags.c
2376 struct trace *trace, struct thread *thread)
2389 .trace = trace,
2391 .show_string_prefix = trace->show_string_prefix,
2426 if (val == 0 && !trace->show_zeros &&
2433 if (trace->show_arg_names)
2438 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) {
2439 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed,
2515 static struct syscall *trace__find_syscall(struct trace *trace, int e_machine, int id)
2523 if (trace->syscalls.table) {
2524 struct syscall **sc_entry = bsearch(&key, trace->syscalls.table,
2525 trace->syscalls.table_size,
2526 sizeof(trace->syscalls.table[0]),
2537 tmp = reallocarray(trace->syscalls.table, trace->syscalls.table_size + 1,
2538 sizeof(trace->syscalls.table[0]));
2544 trace->syscalls.table = tmp;
2545 trace->syscalls.table[trace->syscalls.table_size++] = sc;
2546 qsort(trace->syscalls.table, trace->syscalls.table_size, sizeof(trace->syscalls.table[0]),
2551 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2555 static struct syscall *trace__syscall_info(struct trace *trace, struct evsel *evsel,
2575 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2583 sc = trace__find_syscall(trace, e_machine, id);
2585 err = syscall__read_info(sc, trace);
2590 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err,
2593 fprintf(trace->output, "(%s)", sc->name);
2594 fputs(" information\n", trace->output);
2608 struct trace *trace)
2614 if (trace->summary_bpf)
2617 if (trace->summary_mode == SUMMARY__BY_TOTAL)
2618 syscall_stats = trace->syscall_stats;
2640 if (!trace->errno_summary)
2664 static int trace__printf_interrupted_entry(struct trace *trace)
2670 if (trace->failure_only || trace->current == NULL)
2673 ttrace = thread__priv(trace->current);
2678 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2679 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2681 if (len < trace->args_alignment - 4)
2682 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2684 printed += fprintf(trace->output, " ...\n");
2687 ++trace->nr_events_printed;
2692 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2697 if (trace->print_sample) {
2700 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2748 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2762 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2763 e_machine = thread__e_machine(thread, trace->host);
2764 sc = trace__syscall_info(trace, evsel, e_machine, id);
2767 ttrace = thread__trace(thread, trace);
2771 trace__fprintf_sample(trace, evsel, sample, thread);
2781 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2782 trace__printf_interrupted_entry(trace);
2793 if (evsel != trace->syscalls.events.sys_enter)
2794 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2800 args, augmented_args, augmented_args_size, trace, thread);
2803 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2806 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2807 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2808 if (trace->args_alignment > printed)
2809 alignment = trace->args_alignment - printed;
2810 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2818 if (trace->current != thread) {
2819 thread__put(trace->current);
2820 trace->current = thread__get(thread);
2828 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2841 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2842 e_machine = thread__e_machine(thread, trace->host);
2843 sc = trace__syscall_info(trace, evsel, e_machine, id);
2846 ttrace = thread__trace(thread, trace);
2855 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2856 printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2857 fprintf(trace->output, "%.*s", (int)printed, msg);
2864 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2871 trace->max_stack;
2875 if (machine__resolve(trace->host, &al, sample) < 0)
2884 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2891 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2901 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2910 int alignment = trace->args_alignment, e_machine;
2914 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2915 e_machine = thread__e_machine(thread, trace->host);
2916 sc = trace__syscall_info(trace, evsel, e_machine, id);
2919 ttrace = thread__trace(thread, trace);
2923 trace__fprintf_sample(trace, evsel, sample, thread);
2927 if (trace->summary)
2928 thread__update_stats(thread, ttrace, id, sample, ret, trace);
2930 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2933 ++trace->stats.vfs_getname;
2938 if (trace__filter_duration(trace, duration))
2941 } else if (trace->duration_filter)
2947 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2949 if (cursor->nr < trace->min_stack)
2955 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2958 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2961 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2963 printed += fprintf(trace->output, " ... [");
2964 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2966 printed += fprintf(trace->output, "]: %s()", sc->name);
2976 fprintf(trace->output, ")%*s= ", alignment, " ");
2982 fprintf(trace->output, "%ld", ret);
2989 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2992 fprintf(trace->output, "0 (Timeout)");
2998 .trace = trace,
3002 fprintf(trace->output, "%s", bf);
3004 fprintf(trace->output, "%#lx", ret);
3006 struct thread *child = machine__find_thread(trace->host, ret, ret);
3008 fprintf(trace->output, "%ld", ret);
3011 fprintf(trace->output, " (%s)", thread__comm_str(child));
3017 fputc('\n', trace->output);
3023 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
3027 trace__fprintf_callchain(trace, sample);
3038 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
3042 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3099 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
3105 struct thread *thread = machine__findnew_thread(trace->host,
3108 struct thread_trace *ttrace = thread__trace(thread, trace);
3114 trace->runtime_ms += runtime_ms;
3120 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
3153 static void bpf_output__fprintf(struct trace *trace,
3157 bpf_output__printer, NULL, trace->output);
3158 ++trace->nr_events_printed;
3161 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
3179 .trace = trace,
3181 .show_string_prefix = trace->show_string_prefix,
3211 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE)
3216 if (trace->show_arg_names)
3219 btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type);
3228 return fprintf(trace->output, "%.*s", (int)printed, bf);
3231 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
3241 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3246 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3248 if (cursor->nr < trace->min_stack)
3254 trace__printf_interrupted_entry(trace);
3255 trace__fprintf_tstamp(trace, sample->time, trace->output);
3257 if (trace->trace_syscalls && trace->show_duration)
3258 fprintf(trace->output, "( ): ");
3261 trace__fprintf_comm_tid(trace, thread, trace->output);
3263 if (evsel == trace->syscalls.events.bpf_output) {
3265 int e_machine = thread ? thread__e_machine(thread, trace->host) : EM_HOST;
3266 struct syscall *sc = trace__syscall_info(trace, evsel, e_machine, id);
3269 fprintf(trace->output, "%s(", sc->name);
3270 trace__fprintf_sys_enter(trace, evsel, sample);
3271 fputc(')', trace->output);
3282 fprintf(trace->output, "%s(", evsel->name);
3285 bpf_output__fprintf(trace, sample);
3290 trace__fprintf_sys_enter(trace, evsel, sample))) {
3291 if (trace->libtraceevent_print) {
3294 trace->output);
3296 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
3302 fprintf(trace->output, ")\n");
3305 trace__fprintf_callchain(trace, sample);
3309 ++trace->nr_events_printed;
3337 static int trace__pgfault(struct trace *trace,
3350 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3355 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3357 if (cursor->nr < trace->min_stack)
3363 ttrace = thread__trace(thread, trace);
3369 trace->pfmaj++;
3372 trace->pfmin++;
3375 if (trace->summary_only)
3380 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
3382 fprintf(trace->output, "%sfault [",
3386 print_location(trace->output, sample, &al, false, true);
3388 fprintf(trace->output, "] => ");
3401 print_location(trace->output, sample, &al, true, false);
3403 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
3406 trace__fprintf_callchain(trace, sample);
3410 ++trace->nr_events_printed;
3419 static void trace__set_base_time(struct trace *trace,
3431 if (trace->base_time == 0 && !trace->full_time &&
3433 trace->base_time = sample->time;
3442 struct trace *trace = container_of(tool, struct trace, tool);
3448 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3452 trace__set_base_time(trace, evsel, sample);
3455 ++trace->nr_events;
3456 handler(trace, evsel, event, sample);
3463 static int trace__record(struct trace *trace, int argc, const char **argv)
3495 if (trace->trace_syscalls) {
3513 if (trace->trace_pgfaults & TRACE_PFMAJ)
3517 if (trace->trace_pgfaults & TRACE_PFMIN)
3531 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3532 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp);
3595 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3601 trace__process_event(trace, trace->host, event, sample);
3605 evsel = evlist__id2evsel(trace->evlist, sample->id);
3607 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3611 if (evswitch__discard(&trace->evswitch, evsel))
3614 trace__set_base_time(trace, evsel, sample);
3618 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3623 handler(trace, evsel, event, sample);
3626 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3630 static int trace__add_syscall_newtp(struct trace *trace)
3633 struct evlist *evlist = trace->evlist;
3650 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3651 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3656 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3665 trace->syscalls.events.sys_enter = sys_enter;
3666 trace->syscalls.events.sys_exit = sys_exit;
3679 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3683 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3684 trace->ev_qualifier_ids.nr,
3685 trace->ev_qualifier_ids.entries);
3690 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3691 sys_exit = trace->syscalls.events.sys_exit;
3721 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3726 if (trace->skel->obj == NULL)
3729 bpf_object__for_each_program(pos, trace->skel->obj) {
3740 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3748 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3753 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3760 prog = trace__find_bpf_program_by_title(trace, prog_name);
3770 return trace->skel->progs.syscall_unaugmented;
3773 static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id)
3775 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3780 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3781 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3784 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id)
3786 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3787 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3790 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id)
3792 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
3793 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3796 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array)
3799 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
3808 trace__load_vmlinux_btf(trace);
3809 if (trace->btf == NULL)
3834 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name))
3881 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace,
3898 struct syscall *pair = trace__syscall_info(trace, NULL, sc->e_machine, id);
3903 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3968 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3969 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3983 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine)
3985 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3986 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3987 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
3994 if (!trace__syscall_enabled(trace, key))
3997 trace__init_syscall_bpf_progs(trace, e_machine, key);
4000 prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key);
4004 prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key);
4011 err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array);
4049 struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
4060 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
4067 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
4087 static int trace__set_ev_qualifier_filter(struct trace *trace)
4089 if (trace->syscalls.events.sys_enter)
4090 return trace__set_ev_qualifier_tp_filter(trace);
4112 static int trace__set_filter_loop_pids(struct trace *trace)
4118 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
4121 struct thread *parent = machine__find_thread(trace->host,
4139 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
4140 if (!err && trace->filter_pids.map)
4141 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
4146 static int trace__set_filter_pids(struct trace *trace)
4155 if (trace->filter_pids.nr > 0) {
4156 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
4157 trace->filter_pids.entries);
4158 if (!err && trace->filter_pids.map) {
4159 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
4160 trace->filter_pids.entries);
4162 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
4163 err = trace__set_filter_loop_pids(trace);
4169 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
4171 struct evlist *evlist = trace->evlist;
4178 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
4180 trace__handle_event(trace, event, &sample);
4186 static int __trace__flush_events(struct trace *trace)
4188 u64 first = ordered_events__first_time(&trace->oe.data);
4189 u64 flush = trace->oe.last - NSEC_PER_SEC;
4193 return ordered_events__flush_time(&trace->oe.data, flush);
4198 static int trace__flush_events(struct trace *trace)
4200 return !trace->sort_events ? 0 : __trace__flush_events(trace);
4203 static int trace__deliver_event(struct trace *trace, union perf_event *event)
4207 if (!trace->sort_events)
4208 return __trace__deliver_event(trace, event);
4210 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
4214 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
4218 return trace__flush_events(trace);
4224 struct trace *trace = container_of(oe, struct trace, oe.data);
4226 return __trace__deliver_event(trace, event->event);
4253 static int trace__expand_filter(struct trace *trace, struct evsel *evsel)
4306 .trace = trace,
4354 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
4356 struct evlist *evlist = trace->evlist;
4363 if (trace__expand_filter(trace, evsel)) {
4372 static int trace__run(struct trace *trace, int argc, const char **argv)
4374 struct evlist *evlist = trace->evlist;
4381 trace->live = true;
4383 if (trace->summary_bpf) {
4384 if (trace_prepare_bpf_summary(trace->summary_mode) < 0)
4387 if (trace->summary_only)
4391 if (!trace->raw_augmented_syscalls) {
4392 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
4395 if (trace->trace_syscalls)
4396 trace->vfs_getname = evlist__add_vfs_getname(evlist);
4399 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
4403 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
4407 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
4411 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
4416 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
4418 if (trace->sched &&
4425 * trace -G A -e sched:*switch
4430 * trace -e sched:*switch -G A
4438 * trace -G A -e sched:*switch -G B
4446 if (trace->cgroup)
4447 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
4450 err = evlist__create_maps(evlist, &trace->opts.target);
4452 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
4456 err = trace__symbols_init(trace, evlist);
4458 fprintf(trace->output, "Problems initializing symbol libraries!\n");
4462 if (trace->summary_mode == SUMMARY__BY_TOTAL && !trace->summary_bpf) {
4463 trace->syscall_stats = alloc_syscall_stats();
4464 if (trace->syscall_stats == NULL)
4468 evlist__config(evlist, &trace->opts, &callchain_param);
4471 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4473 fprintf(trace->output, "Couldn't run the workload!\n");
4483 if (trace->syscalls.events.bpf_output) {
4490 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
4493 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
4495 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
4501 if (trace->skel)
4502 trace->filter_pids.map = trace->skel->maps.pids_filtered;
4504 err = trace__set_filter_pids(trace);
4509 if (trace->skel && trace->skel->progs.sys_enter) {
4514 trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
4518 if (trace->ev_qualifier_ids.nr > 0) {
4519 err = trace__set_ev_qualifier_filter(trace);
4523 if (trace->syscalls.events.sys_exit) {
4525 trace->syscalls.events.sys_exit->filter);
4541 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close"));
4543 err = trace__expand_filters(trace, &evsel);
4546 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
4550 if (!trace->summary_only || !trace->summary_bpf) {
4551 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4556 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
4562 if (trace->opts.target.initial_delay) {
4563 usleep(trace->opts.target.initial_delay * 1000);
4567 if (trace->summary_bpf)
4570 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4583 evsel->core.attr.sample_max_stack = trace->max_stack;
4586 before = trace->nr_events;
4597 ++trace->nr_events;
4599 err = trace__deliver_event(trace, event);
4616 if (trace->nr_events == before) {
4625 if (trace__flush_events(trace))
4633 thread__zput(trace->current);
4637 if (trace->summary_bpf)
4640 if (trace->sort_events)
4641 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4644 if (trace->summary) {
4645 if (trace->summary_bpf)
4646 trace_print_bpf_summary(trace->output);
4647 else if (trace->summary_mode == SUMMARY__BY_TOTAL)
4648 trace__fprintf_total_summary(trace, trace->output);
4650 trace__fprintf_thread_summary(trace, trace->output);
4653 if (trace->show_tool_stats) {
4654 fprintf(trace->output, "Stats:\n "
4657 trace->stats.vfs_getname,
4658 trace->stats.proc_getname);
4664 delete_syscall_stats(trace->syscall_stats);
4665 trace__symbols__exit(trace);
4668 cgroup__put(trace->cgroup);
4669 trace->evlist = NULL;
4670 trace->live = false;
4691 fprintf(trace->output, "%s\n", errbuf);
4695 fprintf(trace->output,
4702 fprintf(trace->output, "Not enough memory to run!\n");
4706 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4710 static int trace__replay(struct trace *trace)
4718 .force = trace->force,
4724 perf_tool__init(&trace->tool, /*ordered_events=*/true);
4725 trace->tool.sample = trace__process_sample;
4726 trace->tool.mmap = perf_event__process_mmap;
4727 trace->tool.mmap2 = perf_event__process_mmap2;
4728 trace->tool.comm = perf_event__process_comm;
4729 trace->tool.exit = perf_event__process_exit;
4730 trace->tool.fork = perf_event__process_fork;
4731 trace->tool.attr = perf_event__process_attr;
4732 trace->tool.tracing_data = perf_event__process_tracing_data;
4733 trace->tool.build_id = perf_event__process_build_id;
4734 trace->tool.namespaces = perf_event__process_namespaces;
4736 trace->tool.ordered_events = true;
4737 trace->tool.ordering_requires_timestamps = true;
4740 trace->multiple_threads = true;
4742 session = perf_session__new(&data, &trace->tool);
4746 if (trace->opts.target.pid)
4747 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4749 if (trace->opts.target.tid)
4750 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4755 trace->host = &session->machines.host;
4762 trace->syscalls.events.sys_enter = evsel;
4775 trace->syscalls.events.sys_exit = evsel;
4793 if (trace->summary_mode == SUMMARY__BY_TOTAL) {
4794 trace->syscall_stats = alloc_syscall_stats();
4795 if (trace->syscall_stats == NULL)
4805 else if (trace->summary)
4806 trace__fprintf_thread_summary(trace, trace->output);
4809 delete_syscall_stats(trace->syscall_stats);
4865 static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
4896 sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall);
4905 if (trace->errno_summary && stats->nr_failures) {
4910 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
4923 struct trace *trace, int e_machine, FILE *fp)
4925 return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats);
4928 static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp)
4930 return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats);
4933 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4937 int e_machine = thread__e_machine(thread, trace->host);
4943 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4952 if (trace->sched)
4957 printed += thread__dump_stats(ttrace, trace, e_machine, fp);
4985 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4990 if (machine__thread_list(trace->host, &threads) == 0) {
4996 printed += trace__fprintf_thread(fp, pos->thread, trace);
5002 static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp)
5007 printed += fprintf(fp, "%lu events", trace->nr_events);
5009 if (trace->pfmaj)
5010 printed += fprintf(fp, ", %lu majfaults", trace->pfmaj);
5011 if (trace->pfmin)
5012 printed += fprintf(fp, ", %lu minfaults", trace->pfmin);
5013 if (trace->sched)
5014 printed += fprintf(fp, ", %.3f msec\n", trace->runtime_ms);
5019 printed += system__dump_stats(trace, EM_HOST, fp);
5027 struct trace *trace = opt->value;
5029 trace->duration_filter = atof(str);
5038 struct trace *trace = opt->value;
5048 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
5049 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
5051 if (trace->filter_pids.entries == NULL)
5054 trace->filter_pids.entries[0] = getpid();
5056 for (i = 1; i < trace->filter_pids.nr; ++i)
5057 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
5065 static int trace__open_output(struct trace *trace, const char *filename)
5077 trace->output = fopen(filename, "w");
5079 return trace->output == NULL ? -errno : 0;
5182 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
5190 struct trace *trace = (struct trace *)opt->value;
5203 trace->not_ev_qualifier = true;
5249 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
5250 if (trace->ev_qualifier == NULL) {
5251 fputs("Not enough memory to parse event qualifier", trace->output);
5255 if (trace__validate_ev_qualifier(trace))
5257 trace->trace_syscalls = true;
5264 .evlistp = &trace->evlist,
5283 struct trace *trace = opt->value;
5285 if (!list_empty(&trace->evlist->core.entries)) {
5287 .value = &trace->evlist,
5291 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
5299 struct trace *trace = opt->value;
5302 trace->summary_mode = SUMMARY__BY_THREAD;
5304 trace->summary_mode = SUMMARY__BY_TOTAL;
5306 trace->summary_mode = SUMMARY__BY_CGROUP;
5317 struct trace *trace = arg;
5320 if (!strcmp(var, "trace.add_events")) {
5321 trace->perfconfig_events = strdup(value);
5322 if (trace->perfconfig_events == NULL) {
5323 pr_err("Not enough memory for %s\n", "trace.add_events");
5326 } else if (!strcmp(var, "trace.show_timestamp")) {
5327 trace->show_tstamp = perf_config_bool(var, value);
5328 } else if (!strcmp(var, "trace.show_duration")) {
5329 trace->show_duration = perf_config_bool(var, value);
5330 } else if (!strcmp(var, "trace.show_arg_names")) {
5331 trace->show_arg_names = perf_config_bool(var, value);
5332 if (!trace->show_arg_names)
5333 trace->show_zeros = true;
5334 } else if (!strcmp(var, "trace.show_zeros")) {
5336 if (!trace->show_arg_names && !new_show_zeros) {
5337 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
5340 trace->show_zeros = new_show_zeros;
5341 } else if (!strcmp(var, "trace.show_prefix")) {
5342 trace->show_string_prefix = perf_config_bool(var, value);
5343 } else if (!strcmp(var, "trace.no_inherit")) {
5344 trace->opts.no_inherit = perf_config_bool(var, value);
5345 } else if (!strcmp(var, "trace.args_alignment")) {
5348 trace->args_alignment = args_alignment;
5349 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
5351 trace->libtraceevent_print = true;
5353 trace->libtraceevent_print = false;
5359 static void trace__exit(struct trace *trace)
5361 strlist__delete(trace->ev_qualifier);
5362 zfree(&trace->ev_qualifier_ids.entries);
5363 if (trace->syscalls.table) {
5364 for (size_t i = 0; i < trace->syscalls.table_size; i++)
5365 syscall__delete(trace->syscalls.table[i]);
5366 zfree(&trace->syscalls.table);
5368 zfree(&trace->perfconfig_events);
5369 evlist__delete(trace->evlist);
5370 trace->evlist = NULL;
5372 btf__free(trace->btf);
5373 trace->btf = NULL;
5392 "perf trace [<options>] [<command>]",
5393 "perf trace [<options>] -- <command> [<options>]",
5394 "perf trace record [<options>] [<command>]",
5395 "perf trace record [<options>] -- <command> [<options>]",
5398 struct trace trace = {
5422 OPT_CALLBACK('e', "event", &trace, "event",
5425 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
5427 OPT_BOOLEAN(0, "comm", &trace.show_comm,
5429 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
5430 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
5434 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
5435 "trace events on existing process id"),
5436 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
5437 "trace events on existing thread id"),
5438 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
5440 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
5442 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
5444 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
5446 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
5448 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
5450 OPT_CALLBACK(0, "duration", &trace, "float",
5453 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
5455 OPT_BOOLEAN('T', "time", &trace.full_time,
5457 OPT_BOOLEAN(0, "failure", &trace.failure_only,
5459 OPT_BOOLEAN('s', "summary", &trace.summary_only,
5461 OPT_BOOLEAN('S', "with-summary", &trace.summary,
5463 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
5465 OPT_CALLBACK(0, "summary-mode", &trace, "mode",
5468 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
5469 "Trace pagefaults", parse_pagefaults, "maj"),
5470 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
5471 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
5472 OPT_CALLBACK(0, "call-graph", &trace.opts,
5475 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
5477 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
5479 OPT_ULONG(0, "max-events", &trace.max_events,
5481 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
5484 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
5488 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
5490 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
5494 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
5496 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
5499 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
5501 OPT_BOOLEAN(0, "bpf-summary", &trace.summary_bpf, "Summary syscall stats in BPF"),
5502 OPTS_EVSWITCH(&trace.evswitch),
5522 trace.evlist = evlist__new();
5524 if (trace.evlist == NULL) {
5534 * global setting. If it fails we'll get something in 'perf trace -v'
5539 err = perf_config(trace__config, &trace);
5555 * .perfconfig trace.add_events, and filter those out.
5557 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
5558 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
5559 trace.trace_syscalls = true;
5567 if (trace.perfconfig_events != NULL) {
5571 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
5573 parse_events_error__print(&parse_err, trace.perfconfig_events);
5579 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
5585 if (!trace.trace_syscalls)
5593 if (trace.summary_bpf) {
5594 if (!trace.opts.target.system_wide) {
5599 if (trace.summary_only)
5603 trace.skel = augmented_raw_syscalls_bpf__open();
5604 if (!trace.skel) {
5613 bpf_object__for_each_program(prog, trace.skel->obj) {
5614 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
5618 err = augmented_raw_syscalls_bpf__load(trace.skel);
5624 augmented_raw_syscalls_bpf__attach(trace.skel);
5625 trace__add_syscall_newtp(&trace);
5629 err = bpf__setup_bpf_output(trace.evlist);
5635 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
5636 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
5641 if (trace.trace_pgfaults) {
5642 trace.opts.sample_address = true;
5643 trace.opts.sample_time = true;
5646 if (trace.opts.mmap_pages == UINT_MAX)
5649 if (trace.max_stack == UINT_MAX) {
5650 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5655 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5656 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5662 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5667 if (trace.evlist->core.nr_entries > 0) {
5670 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5671 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) {
5677 trace__load_vmlinux_btf(&trace);
5680 if (trace.sort_events) {
5681 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5682 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5696 if (trace.syscalls.events.bpf_output) {
5697 evlist__for_each_entry(trace.evlist, evsel) {
5701 trace.raw_augmented_syscalls = true;
5705 if (trace.syscalls.events.bpf_output->priv == NULL &&
5707 struct evsel *augmented = trace.syscalls.events.bpf_output;
5754 if (trace.raw_augmented_syscalls)
5755 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5763 err = trace__record(&trace, argc-1, &argv[1]);
5768 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5769 trace.summary_only = true;
5772 if (trace.summary_only)
5773 trace.summary = trace.summary_only;
5776 if (trace.summary) {
5778 if (trace.summary_mode == SUMMARY__NONE)
5779 trace.summary_mode = SUMMARY__BY_THREAD;
5781 if (!trace.summary_bpf && trace.summary_mode == SUMMARY__BY_CGROUP) {
5789 err = trace__open_output(&trace, output_name);
5796 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5800 err = target__validate(&trace.opts.target);
5802 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5803 fprintf(trace.output, "%s", bf);
5807 err = target__parse_uid(&trace.opts.target);
5809 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5810 fprintf(trace.output, "%s", bf);
5814 if (!argc && target__none(&trace.opts.target))
5815 trace.opts.target.system_wide = true;
5818 err = trace__replay(&trace);
5820 err = trace__run(&trace, argc, argv);
5824 fclose(trace.output);
5826 trace__exit(&trace);
5828 augmented_raw_syscalls_bpf__destroy(trace.skel);