xref: /linux/tools/perf/builtin-trace.c (revision a8aa6a6ddce9b5585f2b74f27f3feea1427fb4e7)
1 /*
2  * builtin-trace.c
3  *
4  * Builtin 'trace' command:
5  *
6  * Display a continuously updated trace of any workload, CPU, specific PID,
7  * system wide, etc.  Default format is loosely strace like, but any other
8  * event may be specified using --event.
9  *
10  * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11  *
12  * Initially based on the 'trace' prototype by Thomas Gleixner:
13  *
14  * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15  */
16 
17 #include "util/record.h"
18 #include <api/fs/tracing_path.h>
19 #ifdef HAVE_LIBBPF_SUPPORT
20 #include <bpf/bpf.h>
21 #include <bpf/libbpf.h>
22 #include <bpf/btf.h>
23 #ifdef HAVE_BPF_SKEL
24 #include "bpf_skel/augmented_raw_syscalls.skel.h"
25 #endif
26 #endif
27 #include "util/bpf_map.h"
28 #include "util/rlimit.h"
29 #include "builtin.h"
30 #include "util/cgroup.h"
31 #include "util/color.h"
32 #include "util/config.h"
33 #include "util/debug.h"
34 #include "util/dso.h"
35 #include "util/env.h"
36 #include "util/event.h"
37 #include "util/evsel.h"
38 #include "util/evsel_fprintf.h"
39 #include "util/synthetic-events.h"
40 #include "util/evlist.h"
41 #include "util/evswitch.h"
42 #include "util/mmap.h"
43 #include <subcmd/pager.h>
44 #include <subcmd/exec-cmd.h>
45 #include "util/machine.h"
46 #include "util/map.h"
47 #include "util/symbol.h"
48 #include "util/path.h"
49 #include "util/session.h"
50 #include "util/thread.h"
51 #include <subcmd/parse-options.h>
52 #include "util/strlist.h"
53 #include "util/intlist.h"
54 #include "util/thread_map.h"
55 #include "util/stat.h"
56 #include "util/tool.h"
57 #include "util/util.h"
58 #include "trace/beauty/beauty.h"
59 #include "trace-event.h"
60 #include "util/parse-events.h"
61 #include "util/tracepoint.h"
62 #include "callchain.h"
63 #include "print_binary.h"
64 #include "string2.h"
65 #include "syscalltbl.h"
66 #include "rb_resort.h"
67 #include "../perf.h"
68 #include "trace_augment.h"
69 
70 #include <errno.h>
71 #include <inttypes.h>
72 #include <poll.h>
73 #include <signal.h>
74 #include <stdlib.h>
75 #include <string.h>
76 #include <linux/err.h>
77 #include <linux/filter.h>
78 #include <linux/kernel.h>
79 #include <linux/list_sort.h>
80 #include <linux/random.h>
81 #include <linux/stringify.h>
82 #include <linux/time64.h>
83 #include <linux/zalloc.h>
84 #include <fcntl.h>
85 #include <sys/sysmacros.h>
86 
87 #include <linux/ctype.h>
88 #include <perf/mmap.h>
89 
90 #ifdef HAVE_LIBTRACEEVENT
91 #include <event-parse.h>
92 #endif
93 
94 #ifndef O_CLOEXEC
95 # define O_CLOEXEC		02000000
96 #endif
97 
98 #ifndef F_LINUX_SPECIFIC_BASE
99 # define F_LINUX_SPECIFIC_BASE	1024
100 #endif
101 
102 #define RAW_SYSCALL_ARGS_NUM	6
103 
104 /*
105  * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
106  *
107  * We have to explicitely mark the direction of the flow of data, if from the
108  * kernel to user space or the other way around, since the BPF collector we
109  * have so far copies only from user to kernel space, mark the arguments that
110  * go that direction, so that we don´t end up collecting the previous contents
111  * for syscall args that goes from kernel to user space.
112  */
113 struct syscall_arg_fmt {
114 	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
115 	bool	   (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
116 	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
117 	void	   *parm;
118 	const char *name;
119 	u16	   nr_entries; // for arrays
120 	bool	   from_user;
121 	bool	   show_zero;
122 #ifdef HAVE_LIBBPF_SUPPORT
123 	const struct btf_type *type;
124 	int	   type_id; /* used in btf_dump */
125 #endif
126 };
127 
128 struct syscall_fmt {
129 	const char *name;
130 	const char *alias;
131 	struct {
132 		const char *sys_enter,
133 			   *sys_exit;
134 	}	   bpf_prog_name;
135 	struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
136 	u8	   nr_args;
137 	bool	   errpid;
138 	bool	   timeout;
139 	bool	   hexret;
140 };
141 
142 struct trace {
143 	struct perf_tool	tool;
144 	struct syscalltbl	*sctbl;
145 	struct {
146 		struct syscall  *table;
147 		struct {
148 			struct evsel *sys_enter,
149 				*sys_exit,
150 				*bpf_output;
151 		}		events;
152 	} syscalls;
153 #ifdef HAVE_BPF_SKEL
154 	struct augmented_raw_syscalls_bpf *skel;
155 #endif
156 #ifdef HAVE_LIBBPF_SUPPORT
157 	struct btf		*btf;
158 #endif
159 	struct record_opts	opts;
160 	struct evlist	*evlist;
161 	struct machine		*host;
162 	struct thread		*current;
163 	struct cgroup		*cgroup;
164 	u64			base_time;
165 	FILE			*output;
166 	unsigned long		nr_events;
167 	unsigned long		nr_events_printed;
168 	unsigned long		max_events;
169 	struct evswitch		evswitch;
170 	struct strlist		*ev_qualifier;
171 	struct {
172 		size_t		nr;
173 		int		*entries;
174 	}			ev_qualifier_ids;
175 	struct {
176 		size_t		nr;
177 		pid_t		*entries;
178 		struct bpf_map  *map;
179 	}			filter_pids;
180 	double			duration_filter;
181 	double			runtime_ms;
182 	struct {
183 		u64		vfs_getname,
184 				proc_getname;
185 	} stats;
186 	unsigned int		max_stack;
187 	unsigned int		min_stack;
188 	int			raw_augmented_syscalls_args_size;
189 	bool			raw_augmented_syscalls;
190 	bool			fd_path_disabled;
191 	bool			sort_events;
192 	bool			not_ev_qualifier;
193 	bool			live;
194 	bool			full_time;
195 	bool			sched;
196 	bool			multiple_threads;
197 	bool			summary;
198 	bool			summary_only;
199 	bool			errno_summary;
200 	bool			failure_only;
201 	bool			show_comm;
202 	bool			print_sample;
203 	bool			show_tool_stats;
204 	bool			trace_syscalls;
205 	bool			libtraceevent_print;
206 	bool			kernel_syscallchains;
207 	s16			args_alignment;
208 	bool			show_tstamp;
209 	bool			show_duration;
210 	bool			show_zeros;
211 	bool			show_arg_names;
212 	bool			show_string_prefix;
213 	bool			force;
214 	bool			vfs_getname;
215 	bool			force_btf;
216 	int			trace_pgfaults;
217 	char			*perfconfig_events;
218 	struct {
219 		struct ordered_events	data;
220 		u64			last;
221 	} oe;
222 };
223 
224 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused)
225 {
226 #ifdef HAVE_LIBBPF_SUPPORT
227 	if (trace->btf != NULL)
228 		return;
229 
230 	trace->btf = btf__load_vmlinux_btf();
231 	if (verbose > 0) {
232 		fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" :
233 						    "Failed to load vmlinux BTF\n");
234 	}
235 #endif
236 }
237 
238 struct tp_field {
239 	int offset;
240 	union {
241 		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
242 		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
243 	};
244 };
245 
246 #define TP_UINT_FIELD(bits) \
247 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
248 { \
249 	u##bits value; \
250 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
251 	return value;  \
252 }
253 
254 TP_UINT_FIELD(8);
255 TP_UINT_FIELD(16);
256 TP_UINT_FIELD(32);
257 TP_UINT_FIELD(64);
258 
259 #define TP_UINT_FIELD__SWAPPED(bits) \
260 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
261 { \
262 	u##bits value; \
263 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
264 	return bswap_##bits(value);\
265 }
266 
267 TP_UINT_FIELD__SWAPPED(16);
268 TP_UINT_FIELD__SWAPPED(32);
269 TP_UINT_FIELD__SWAPPED(64);
270 
271 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
272 {
273 	field->offset = offset;
274 
275 	switch (size) {
276 	case 1:
277 		field->integer = tp_field__u8;
278 		break;
279 	case 2:
280 		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
281 		break;
282 	case 4:
283 		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
284 		break;
285 	case 8:
286 		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
287 		break;
288 	default:
289 		return -1;
290 	}
291 
292 	return 0;
293 }
294 
295 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
296 {
297 	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
298 }
299 
300 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
301 {
302 	return sample->raw_data + field->offset;
303 }
304 
305 static int __tp_field__init_ptr(struct tp_field *field, int offset)
306 {
307 	field->offset = offset;
308 	field->pointer = tp_field__ptr;
309 	return 0;
310 }
311 
312 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
313 {
314 	return __tp_field__init_ptr(field, format_field->offset);
315 }
316 
317 struct syscall_tp {
318 	struct tp_field id;
319 	union {
320 		struct tp_field args, ret;
321 	};
322 };
323 
324 /*
325  * The evsel->priv as used by 'perf trace'
326  * sc:	for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
327  * fmt: for all the other tracepoints
328  */
329 struct evsel_trace {
330 	struct syscall_tp	sc;
331 	struct syscall_arg_fmt  *fmt;
332 };
333 
334 static struct evsel_trace *evsel_trace__new(void)
335 {
336 	return zalloc(sizeof(struct evsel_trace));
337 }
338 
339 static void evsel_trace__delete(struct evsel_trace *et)
340 {
341 	if (et == NULL)
342 		return;
343 
344 	zfree(&et->fmt);
345 	free(et);
346 }
347 
348 /*
349  * Used with raw_syscalls:sys_{enter,exit} and with the
350  * syscalls:sys_{enter,exit}_SYSCALL tracepoints
351  */
352 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
353 {
354 	struct evsel_trace *et = evsel->priv;
355 
356 	return &et->sc;
357 }
358 
359 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
360 {
361 	if (evsel->priv == NULL) {
362 		evsel->priv = evsel_trace__new();
363 		if (evsel->priv == NULL)
364 			return NULL;
365 	}
366 
367 	return __evsel__syscall_tp(evsel);
368 }
369 
370 /*
371  * Used with all the other tracepoints.
372  */
373 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
374 {
375 	struct evsel_trace *et = evsel->priv;
376 
377 	return et->fmt;
378 }
379 
380 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
381 {
382 	struct evsel_trace *et = evsel->priv;
383 
384 	if (evsel->priv == NULL) {
385 		et = evsel->priv = evsel_trace__new();
386 
387 		if (et == NULL)
388 			return NULL;
389 	}
390 
391 	if (et->fmt == NULL) {
392 		const struct tep_event *tp_format = evsel__tp_format(evsel);
393 
394 		if (tp_format == NULL)
395 			goto out_delete;
396 
397 		et->fmt = calloc(tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
398 		if (et->fmt == NULL)
399 			goto out_delete;
400 	}
401 
402 	return __evsel__syscall_arg_fmt(evsel);
403 
404 out_delete:
405 	evsel_trace__delete(evsel->priv);
406 	evsel->priv = NULL;
407 	return NULL;
408 }
409 
410 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
411 {
412 	struct tep_format_field *format_field = evsel__field(evsel, name);
413 
414 	if (format_field == NULL)
415 		return -1;
416 
417 	return tp_field__init_uint(field, format_field, evsel->needs_swap);
418 }
419 
420 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
421 	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
422 	   evsel__init_tp_uint_field(evsel, &sc->name, #name); })
423 
424 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
425 {
426 	struct tep_format_field *format_field = evsel__field(evsel, name);
427 
428 	if (format_field == NULL)
429 		return -1;
430 
431 	return tp_field__init_ptr(field, format_field);
432 }
433 
434 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
435 	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
436 	   evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
437 
438 static void evsel__delete_priv(struct evsel *evsel)
439 {
440 	zfree(&evsel->priv);
441 	evsel__delete(evsel);
442 }
443 
444 static int evsel__init_syscall_tp(struct evsel *evsel)
445 {
446 	struct syscall_tp *sc = evsel__syscall_tp(evsel);
447 
448 	if (sc != NULL) {
449 		if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
450 		    evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
451 			return -ENOENT;
452 
453 		return 0;
454 	}
455 
456 	return -ENOMEM;
457 }
458 
459 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
460 {
461 	struct syscall_tp *sc = evsel__syscall_tp(evsel);
462 
463 	if (sc != NULL) {
464 		struct tep_format_field *syscall_id = evsel__field(tp, "id");
465 		if (syscall_id == NULL)
466 			syscall_id = evsel__field(tp, "__syscall_nr");
467 		if (syscall_id == NULL ||
468 		    __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
469 			return -EINVAL;
470 
471 		return 0;
472 	}
473 
474 	return -ENOMEM;
475 }
476 
477 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
478 {
479 	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
480 
481 	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
482 }
483 
484 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
485 {
486 	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
487 
488 	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
489 }
490 
491 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
492 {
493 	if (evsel__syscall_tp(evsel) != NULL) {
494 		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
495 			return -ENOENT;
496 
497 		evsel->handler = handler;
498 		return 0;
499 	}
500 
501 	return -ENOMEM;
502 }
503 
504 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
505 {
506 	struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
507 
508 	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
509 	if (IS_ERR(evsel))
510 		evsel = evsel__newtp("syscalls", direction);
511 
512 	if (IS_ERR(evsel))
513 		return NULL;
514 
515 	if (evsel__init_raw_syscall_tp(evsel, handler))
516 		goto out_delete;
517 
518 	return evsel;
519 
520 out_delete:
521 	evsel__delete_priv(evsel);
522 	return NULL;
523 }
524 
525 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
526 	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
527 	   fields->name.integer(&fields->name, sample); })
528 
529 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
530 	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
531 	   fields->name.pointer(&fields->name, sample); })
532 
533 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
534 {
535 	int idx = val - sa->offset;
536 
537 	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
538 		size_t printed = scnprintf(bf, size, intfmt, val);
539 		if (show_suffix)
540 			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
541 		return printed;
542 	}
543 
544 	return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
545 }
546 
547 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
548 {
549 	int idx = val - sa->offset;
550 
551 	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
552 		size_t printed = scnprintf(bf, size, intfmt, val);
553 		if (show_prefix)
554 			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
555 		return printed;
556 	}
557 
558 	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
559 }
560 
561 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
562 						const char *intfmt,
563 					        struct syscall_arg *arg)
564 {
565 	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
566 }
567 
568 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
569 					      struct syscall_arg *arg)
570 {
571 	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
572 }
573 
574 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
575 
576 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
577 {
578 	return strarray__strtoul(arg->parm, bf, size, ret);
579 }
580 
581 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
582 {
583 	return strarray__strtoul_flags(arg->parm, bf, size, ret);
584 }
585 
586 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
587 {
588 	return strarrays__strtoul(arg->parm, bf, size, ret);
589 }
590 
591 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
592 {
593 	return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
594 }
595 
596 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
597 {
598 	size_t printed;
599 	int i;
600 
601 	for (i = 0; i < sas->nr_entries; ++i) {
602 		struct strarray *sa = sas->entries[i];
603 		int idx = val - sa->offset;
604 
605 		if (idx >= 0 && idx < sa->nr_entries) {
606 			if (sa->entries[idx] == NULL)
607 				break;
608 			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
609 		}
610 	}
611 
612 	printed = scnprintf(bf, size, intfmt, val);
613 	if (show_prefix)
614 		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
615 	return printed;
616 }
617 
618 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
619 {
620 	int i;
621 
622 	for (i = 0; i < sa->nr_entries; ++i) {
623 		if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
624 			*ret = sa->offset + i;
625 			return true;
626 		}
627 	}
628 
629 	return false;
630 }
631 
632 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
633 {
634 	u64 val = 0;
635 	char *tok = bf, *sep, *end;
636 
637 	*ret = 0;
638 
639 	while (size != 0) {
640 		int toklen = size;
641 
642 		sep = memchr(tok, '|', size);
643 		if (sep != NULL) {
644 			size -= sep - tok + 1;
645 
646 			end = sep - 1;
647 			while (end > tok && isspace(*end))
648 				--end;
649 
650 			toklen = end - tok + 1;
651 		}
652 
653 		while (isspace(*tok))
654 			++tok;
655 
656 		if (isalpha(*tok) || *tok == '_') {
657 			if (!strarray__strtoul(sa, tok, toklen, &val))
658 				return false;
659 		} else
660 			val = strtoul(tok, NULL, 0);
661 
662 		*ret |= (1 << (val - 1));
663 
664 		if (sep == NULL)
665 			break;
666 		tok = sep + 1;
667 	}
668 
669 	return true;
670 }
671 
672 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
673 {
674 	int i;
675 
676 	for (i = 0; i < sas->nr_entries; ++i) {
677 		struct strarray *sa = sas->entries[i];
678 
679 		if (strarray__strtoul(sa, bf, size, ret))
680 			return true;
681 	}
682 
683 	return false;
684 }
685 
686 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
687 					struct syscall_arg *arg)
688 {
689 	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
690 }
691 
692 #ifndef AT_FDCWD
693 #define AT_FDCWD	-100
694 #endif
695 
696 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
697 					   struct syscall_arg *arg)
698 {
699 	int fd = arg->val;
700 	const char *prefix = "AT_FD";
701 
702 	if (fd == AT_FDCWD)
703 		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
704 
705 	return syscall_arg__scnprintf_fd(bf, size, arg);
706 }
707 
708 #define SCA_FDAT syscall_arg__scnprintf_fd_at
709 
710 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
711 					      struct syscall_arg *arg);
712 
713 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
714 
715 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
716 {
717 	return scnprintf(bf, size, "%#lx", arg->val);
718 }
719 
720 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
721 {
722 	if (arg->val == 0)
723 		return scnprintf(bf, size, "NULL");
724 	return syscall_arg__scnprintf_hex(bf, size, arg);
725 }
726 
727 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
728 {
729 	return scnprintf(bf, size, "%d", arg->val);
730 }
731 
732 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
733 {
734 	return scnprintf(bf, size, "%ld", arg->val);
735 }
736 
737 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
738 {
739 	// XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
740 	//     fill missing comms using thread__set_comm()...
741 	//     here or in a special syscall_arg__scnprintf_pid_sched_tp...
742 	return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
743 }
744 
745 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
746 
747 static const char *bpf_cmd[] = {
748 	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
749 	"MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
750 	"PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
751 	"PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
752 	"PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
753 	"TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
754 	"BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
755 	"MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
756 	"LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
757 	"LINK_DETACH", "PROG_BIND_MAP",
758 };
759 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
760 
761 static const char *fsmount_flags[] = {
762 	[1] = "CLOEXEC",
763 };
764 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
765 
766 #include "trace/beauty/generated/fsconfig_arrays.c"
767 
768 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
769 
770 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
771 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
772 
773 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
774 static DEFINE_STRARRAY(itimers, "ITIMER_");
775 
776 static const char *keyctl_options[] = {
777 	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
778 	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
779 	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
780 	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
781 	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
782 };
783 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
784 
785 static const char *whences[] = { "SET", "CUR", "END",
786 #ifdef SEEK_DATA
787 "DATA",
788 #endif
789 #ifdef SEEK_HOLE
790 "HOLE",
791 #endif
792 };
793 static DEFINE_STRARRAY(whences, "SEEK_");
794 
795 static const char *fcntl_cmds[] = {
796 	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
797 	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
798 	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
799 	"GETOWNER_UIDS",
800 };
801 static DEFINE_STRARRAY(fcntl_cmds, "F_");
802 
803 static const char *fcntl_linux_specific_cmds[] = {
804 	"SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
805 	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
806 	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
807 };
808 
809 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
810 
811 static struct strarray *fcntl_cmds_arrays[] = {
812 	&strarray__fcntl_cmds,
813 	&strarray__fcntl_linux_specific_cmds,
814 };
815 
816 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
817 
818 static const char *rlimit_resources[] = {
819 	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
820 	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
821 	"RTTIME",
822 };
823 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
824 
825 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
826 static DEFINE_STRARRAY(sighow, "SIG_");
827 
828 static const char *clockid[] = {
829 	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
830 	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
831 	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
832 };
833 static DEFINE_STRARRAY(clockid, "CLOCK_");
834 
835 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
836 						 struct syscall_arg *arg)
837 {
838 	bool show_prefix = arg->show_string_prefix;
839 	const char *suffix = "_OK";
840 	size_t printed = 0;
841 	int mode = arg->val;
842 
843 	if (mode == F_OK) /* 0 */
844 		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
845 #define	P_MODE(n) \
846 	if (mode & n##_OK) { \
847 		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
848 		mode &= ~n##_OK; \
849 	}
850 
851 	P_MODE(R);
852 	P_MODE(W);
853 	P_MODE(X);
854 #undef P_MODE
855 
856 	if (mode)
857 		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
858 
859 	return printed;
860 }
861 
862 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
863 
864 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
865 					      struct syscall_arg *arg);
866 
867 #define SCA_FILENAME syscall_arg__scnprintf_filename
868 
869 // 'argname' is just documentational at this point, to remove the previous comment with that info
870 #define SCA_FILENAME_FROM_USER(argname) \
871 	  { .scnprintf	= SCA_FILENAME, \
872 	    .from_user	= true, }
873 
874 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg);
875 
876 #define SCA_BUF syscall_arg__scnprintf_buf
877 
878 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
879 						struct syscall_arg *arg)
880 {
881 	bool show_prefix = arg->show_string_prefix;
882 	const char *prefix = "O_";
883 	int printed = 0, flags = arg->val;
884 
885 #define	P_FLAG(n) \
886 	if (flags & O_##n) { \
887 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
888 		flags &= ~O_##n; \
889 	}
890 
891 	P_FLAG(CLOEXEC);
892 	P_FLAG(NONBLOCK);
893 #undef P_FLAG
894 
895 	if (flags)
896 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
897 
898 	return printed;
899 }
900 
901 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
902 
903 #ifndef GRND_NONBLOCK
904 #define GRND_NONBLOCK	0x0001
905 #endif
906 #ifndef GRND_RANDOM
907 #define GRND_RANDOM	0x0002
908 #endif
909 
910 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
911 						   struct syscall_arg *arg)
912 {
913 	bool show_prefix = arg->show_string_prefix;
914 	const char *prefix = "GRND_";
915 	int printed = 0, flags = arg->val;
916 
917 #define	P_FLAG(n) \
918 	if (flags & GRND_##n) { \
919 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
920 		flags &= ~GRND_##n; \
921 	}
922 
923 	P_FLAG(RANDOM);
924 	P_FLAG(NONBLOCK);
925 #undef P_FLAG
926 
927 	if (flags)
928 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
929 
930 	return printed;
931 }
932 
933 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
934 
935 #ifdef HAVE_LIBBPF_SUPPORT
936 static void syscall_arg_fmt__cache_btf_enum(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
937 {
938 	int id;
939 
940 	type = strstr(type, "enum ");
941 	if (type == NULL)
942 		return;
943 
944 	type += 5; // skip "enum " to get the enumeration name
945 
946 	id = btf__find_by_name(btf, type);
947 	if (id < 0)
948 		return;
949 
950 	arg_fmt->type = btf__type_by_id(btf, id);
951 }
952 
953 static bool syscall_arg__strtoul_btf_enum(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
954 {
955 	const struct btf_type *bt = arg->fmt->type;
956 	struct btf *btf = arg->trace->btf;
957 	struct btf_enum *be = btf_enum(bt);
958 
959 	for (int i = 0; i < btf_vlen(bt); ++i, ++be) {
960 		const char *name = btf__name_by_offset(btf, be->name_off);
961 		int max_len = max(size, strlen(name));
962 
963 		if (strncmp(name, bf, max_len) == 0) {
964 			*val = be->val;
965 			return true;
966 		}
967 	}
968 
969 	return false;
970 }
971 
972 static bool syscall_arg__strtoul_btf_type(char *bf, size_t size, struct syscall_arg *arg, u64 *val)
973 {
974 	const struct btf_type *bt;
975 	char *type = arg->type_name;
976 	struct btf *btf;
977 
978 	trace__load_vmlinux_btf(arg->trace);
979 
980 	btf = arg->trace->btf;
981 	if (btf == NULL)
982 		return false;
983 
984 	if (arg->fmt->type == NULL) {
985 		// See if this is an enum
986 		syscall_arg_fmt__cache_btf_enum(arg->fmt, btf, type);
987 	}
988 
989 	// Now let's see if we have a BTF type resolved
990 	bt = arg->fmt->type;
991 	if (bt == NULL)
992 		return false;
993 
994 	// If it is an enum:
995 	if (btf_is_enum(arg->fmt->type))
996 		return syscall_arg__strtoul_btf_enum(bf, size, arg, val);
997 
998 	return false;
999 }
1000 
1001 static size_t btf_enum_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, int val)
1002 {
1003 	struct btf_enum *be = btf_enum(type);
1004 	const int nr_entries = btf_vlen(type);
1005 
1006 	for (int i = 0; i < nr_entries; ++i, ++be) {
1007 		if (be->val == val) {
1008 			return scnprintf(bf, size, "%s",
1009 					 btf__name_by_offset(btf, be->name_off));
1010 		}
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 struct trace_btf_dump_snprintf_ctx {
1017 	char   *bf;
1018 	size_t printed, size;
1019 };
1020 
1021 static void trace__btf_dump_snprintf(void *vctx, const char *fmt, va_list args)
1022 {
1023 	struct trace_btf_dump_snprintf_ctx *ctx = vctx;
1024 
1025 	ctx->printed += vscnprintf(ctx->bf + ctx->printed, ctx->size - ctx->printed, fmt, args);
1026 }
1027 
1028 static size_t btf_struct_scnprintf(const struct btf_type *type, struct btf *btf, char *bf, size_t size, struct syscall_arg *arg)
1029 {
1030 	struct trace_btf_dump_snprintf_ctx ctx = {
1031 		.bf   = bf,
1032 		.size = size,
1033 	};
1034 	struct augmented_arg *augmented_arg = arg->augmented.args;
1035 	int type_id = arg->fmt->type_id, consumed;
1036 	struct btf_dump *btf_dump;
1037 
1038 	LIBBPF_OPTS(btf_dump_opts, dump_opts);
1039 	LIBBPF_OPTS(btf_dump_type_data_opts, dump_data_opts);
1040 
1041 	if (arg == NULL || arg->augmented.args == NULL)
1042 		return 0;
1043 
1044 	dump_data_opts.compact	  = true;
1045 	dump_data_opts.skip_names = !arg->trace->show_arg_names;
1046 
1047 	btf_dump = btf_dump__new(btf, trace__btf_dump_snprintf, &ctx, &dump_opts);
1048 	if (btf_dump == NULL)
1049 		return 0;
1050 
1051 	/* pretty print the struct data here */
1052 	if (btf_dump__dump_type_data(btf_dump, type_id, arg->augmented.args->value, type->size, &dump_data_opts) == 0)
1053 		return 0;
1054 
1055 	consumed = sizeof(*augmented_arg) + augmented_arg->size;
1056 	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1057 	arg->augmented.size -= consumed;
1058 
1059 	btf_dump__free(btf_dump);
1060 
1061 	return ctx.printed;
1062 }
1063 
1064 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf,
1065 				   size_t size, int val, char *type)
1066 {
1067 	struct syscall_arg_fmt *arg_fmt = arg->fmt;
1068 
1069 	if (trace->btf == NULL)
1070 		return 0;
1071 
1072 	if (arg_fmt->type == NULL) {
1073 		// Check if this is an enum and if we have the BTF type for it.
1074 		syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type);
1075 	}
1076 
1077 	// Did we manage to find a BTF type for the syscall/tracepoint argument?
1078 	if (arg_fmt->type == NULL)
1079 		return 0;
1080 
1081 	if (btf_is_enum(arg_fmt->type))
1082 		return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val);
1083 	else if (btf_is_struct(arg_fmt->type) || btf_is_union(arg_fmt->type))
1084 		return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg);
1085 
1086 	return 0;
1087 }
1088 
1089 #else // HAVE_LIBBPF_SUPPORT
1090 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __maybe_unused,
1091 				   char *bf __maybe_unused, size_t size __maybe_unused, int val __maybe_unused,
1092 				   char *type __maybe_unused)
1093 {
1094 	return 0;
1095 }
1096 
1097 static bool syscall_arg__strtoul_btf_type(char *bf __maybe_unused, size_t size __maybe_unused,
1098 					  struct syscall_arg *arg __maybe_unused, u64 *val __maybe_unused)
1099 {
1100 	return false;
1101 }
1102 #endif // HAVE_LIBBPF_SUPPORT
1103 
1104 #define STUL_BTF_TYPE syscall_arg__strtoul_btf_type
1105 
1106 #define STRARRAY(name, array) \
1107 	  { .scnprintf	= SCA_STRARRAY, \
1108 	    .strtoul	= STUL_STRARRAY, \
1109 	    .parm	= &strarray__##array, }
1110 
1111 #define STRARRAY_FLAGS(name, array) \
1112 	  { .scnprintf	= SCA_STRARRAY_FLAGS, \
1113 	    .strtoul	= STUL_STRARRAY_FLAGS, \
1114 	    .parm	= &strarray__##array, }
1115 
1116 #include "trace/beauty/eventfd.c"
1117 #include "trace/beauty/futex_op.c"
1118 #include "trace/beauty/futex_val3.c"
1119 #include "trace/beauty/mmap.c"
1120 #include "trace/beauty/mode_t.c"
1121 #include "trace/beauty/msg_flags.c"
1122 #include "trace/beauty/open_flags.c"
1123 #include "trace/beauty/perf_event_open.c"
1124 #include "trace/beauty/pid.c"
1125 #include "trace/beauty/sched_policy.c"
1126 #include "trace/beauty/seccomp.c"
1127 #include "trace/beauty/signum.c"
1128 #include "trace/beauty/socket_type.c"
1129 #include "trace/beauty/waitid_options.c"
1130 
1131 static const struct syscall_fmt syscall_fmts[] = {
1132 	{ .name	    = "access",
1133 	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
1134 	{ .name	    = "arch_prctl",
1135 	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
1136 		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
1137 	{ .name	    = "bind",
1138 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
1139 		   [1] = SCA_SOCKADDR_FROM_USER(umyaddr),
1140 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
1141 	{ .name	    = "bpf",
1142 	  .arg = { [0] = STRARRAY(cmd, bpf_cmd),
1143 		   [1] = { .from_user = true /* attr */, }, } },
1144 	{ .name	    = "brk",	    .hexret = true,
1145 	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
1146 	{ .name     = "clock_gettime",
1147 	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
1148 	{ .name	    = "clock_nanosleep",
1149 	  .arg = { [2] = SCA_TIMESPEC_FROM_USER(req), }, },
1150 	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
1151 	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
1152 		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
1153 		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
1154 		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
1155 		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
1156 	{ .name	    = "close",
1157 	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
1158 	{ .name	    = "connect",
1159 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
1160 		   [1] = SCA_SOCKADDR_FROM_USER(servaddr),
1161 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
1162 	{ .name	    = "epoll_ctl",
1163 	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
1164 	{ .name	    = "eventfd2",
1165 	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
1166 	{ .name     = "faccessat",
1167 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dirfd */ },
1168 		   [1] = SCA_FILENAME_FROM_USER(pathname),
1169 		   [2] = { .scnprintf = SCA_ACCMODE,	  /* mode */ }, }, },
1170 	{ .name     = "faccessat2",
1171 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dirfd */ },
1172 		   [1] = SCA_FILENAME_FROM_USER(pathname),
1173 		   [2] = { .scnprintf = SCA_ACCMODE,	  /* mode */ },
1174 		   [3] = { .scnprintf = SCA_FACCESSAT2_FLAGS, /* flags */ }, }, },
1175 	{ .name	    = "fchmodat",
1176 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1177 	{ .name	    = "fchownat",
1178 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1179 	{ .name	    = "fcntl",
1180 	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD,  /* cmd */
1181 			   .strtoul   = STUL_STRARRAYS,
1182 			   .parm      = &strarrays__fcntl_cmds_arrays,
1183 			   .show_zero = true, },
1184 		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
1185 	{ .name	    = "flock",
1186 	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
1187 	{ .name     = "fsconfig",
1188 	  .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
1189 	{ .name     = "fsmount",
1190 	  .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
1191 		   [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
1192 	{ .name     = "fspick",
1193 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
1194 		   [1] = SCA_FILENAME_FROM_USER(path),
1195 		   [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
1196 	{ .name	    = "fstat", .alias = "newfstat", },
1197 	{ .name	    = "futex",
1198 	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
1199 		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
1200 	{ .name	    = "futimesat",
1201 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1202 	{ .name	    = "getitimer",
1203 	  .arg = { [0] = STRARRAY(which, itimers), }, },
1204 	{ .name	    = "getpid",	    .errpid = true, },
1205 	{ .name	    = "getpgid",    .errpid = true, },
1206 	{ .name	    = "getppid",    .errpid = true, },
1207 	{ .name	    = "getrandom",
1208 	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
1209 	{ .name	    = "getrlimit",
1210 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1211 	{ .name	    = "getsockopt",
1212 	  .arg = { [1] = STRARRAY(level, socket_level), }, },
1213 	{ .name	    = "gettid",	    .errpid = true, },
1214 	{ .name	    = "ioctl",
1215 	  .arg = {
1216 #if defined(__i386__) || defined(__x86_64__)
1217 /*
1218  * FIXME: Make this available to all arches.
1219  */
1220 		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
1221 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1222 #else
1223 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1224 #endif
1225 	{ .name	    = "kcmp",	    .nr_args = 5,
1226 	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
1227 		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
1228 		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
1229 		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
1230 		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
1231 	{ .name	    = "keyctl",
1232 	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
1233 	{ .name	    = "kill",
1234 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1235 	{ .name	    = "linkat",
1236 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1237 	{ .name	    = "lseek",
1238 	  .arg = { [2] = STRARRAY(whence, whences), }, },
1239 	{ .name	    = "lstat", .alias = "newlstat", },
1240 	{ .name     = "madvise",
1241 	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
1242 		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1243 	{ .name	    = "mkdirat",
1244 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1245 	{ .name	    = "mknodat",
1246 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1247 	{ .name	    = "mmap",	    .hexret = true,
1248 /* The standard mmap maps to old_mmap on s390x */
1249 #if defined(__s390x__)
1250 	.alias = "old_mmap",
1251 #endif
1252 	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
1253 		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */
1254 			   .strtoul   = STUL_STRARRAY_FLAGS,
1255 			   .parm      = &strarray__mmap_flags, },
1256 		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
1257 	{ .name	    = "mount",
1258 	  .arg = { [0] = SCA_FILENAME_FROM_USER(devname),
1259 		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1260 			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1261 	{ .name	    = "move_mount",
1262 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* from_dfd */ },
1263 		   [1] = SCA_FILENAME_FROM_USER(pathname),
1264 		   [2] = { .scnprintf = SCA_FDAT,	/* to_dfd */ },
1265 		   [3] = SCA_FILENAME_FROM_USER(pathname),
1266 		   [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1267 	{ .name	    = "mprotect",
1268 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1269 		   [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ }, }, },
1270 	{ .name	    = "mq_unlink",
1271 	  .arg = { [0] = SCA_FILENAME_FROM_USER(u_name), }, },
1272 	{ .name	    = "mremap",	    .hexret = true,
1273 	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1274 	{ .name	    = "name_to_handle_at",
1275 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1276 	{ .name	    = "nanosleep",
1277 	  .arg = { [0] = SCA_TIMESPEC_FROM_USER(req), }, },
1278 	{ .name	    = "newfstatat", .alias = "fstatat",
1279 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dirfd */ },
1280 		   [1] = SCA_FILENAME_FROM_USER(pathname),
1281 		   [3] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ }, }, },
1282 	{ .name	    = "open",
1283 	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1284 	{ .name	    = "open_by_handle_at",
1285 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1286 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1287 	{ .name	    = "openat",
1288 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1289 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1290 	{ .name	    = "perf_event_open",
1291 	  .arg = { [0] = SCA_PERF_ATTR_FROM_USER(attr),
1292 		   [2] = { .scnprintf = SCA_INT,	/* cpu */ },
1293 		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
1294 		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1295 	{ .name	    = "pipe2",
1296 	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1297 	{ .name	    = "pkey_alloc",
1298 	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
1299 	{ .name	    = "pkey_free",
1300 	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
1301 	{ .name	    = "pkey_mprotect",
1302 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1303 		   [2] = { .scnprintf = SCA_MMAP_PROT, .show_zero = true, /* prot */ },
1304 		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
1305 	{ .name	    = "poll", .timeout = true, },
1306 	{ .name	    = "ppoll", .timeout = true, },
1307 	{ .name	    = "prctl",
1308 	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1309 			   .strtoul   = STUL_STRARRAY,
1310 			   .parm      = &strarray__prctl_options, },
1311 		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1312 		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1313 	{ .name	    = "pread", .alias = "pread64", },
1314 	{ .name	    = "preadv", .alias = "pread", },
1315 	{ .name	    = "prlimit64",
1316 	  .arg = { [1] = STRARRAY(resource, rlimit_resources),
1317 		   [2] = { .from_user = true /* new_rlim */, }, }, },
1318 	{ .name	    = "pwrite", .alias = "pwrite64", },
1319 	{ .name	    = "readlinkat",
1320 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1321 	{ .name	    = "recvfrom",
1322 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1323 	{ .name	    = "recvmmsg",
1324 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1325 	{ .name	    = "recvmsg",
1326 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1327 	{ .name	    = "renameat",
1328 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1329 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1330 	{ .name	    = "renameat2",
1331 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1332 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1333 		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1334 	{ .name	    = "rseq",	    .errpid = true,
1335 	  .arg = { [0] = { .from_user = true /* rseq */, }, }, },
1336 	{ .name	    = "rt_sigaction",
1337 	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1338 	{ .name	    = "rt_sigprocmask",
1339 	  .arg = { [0] = STRARRAY(how, sighow), }, },
1340 	{ .name	    = "rt_sigqueueinfo",
1341 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1342 	{ .name	    = "rt_tgsigqueueinfo",
1343 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1344 	{ .name	    = "sched_setscheduler",
1345 	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1346 	{ .name	    = "seccomp",
1347 	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
1348 		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1349 	{ .name	    = "select", .timeout = true, },
1350 	{ .name	    = "sendfile", .alias = "sendfile64", },
1351 	{ .name	    = "sendmmsg",
1352 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1353 	{ .name	    = "sendmsg",
1354 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1355 	{ .name	    = "sendto",
1356 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1357 		   [4] = SCA_SOCKADDR_FROM_USER(addr), }, },
1358 	{ .name	    = "set_robust_list",	    .errpid = true,
1359 	  .arg = { [0] = { .from_user = true /* head */, }, }, },
1360 	{ .name	    = "set_tid_address", .errpid = true, },
1361 	{ .name	    = "setitimer",
1362 	  .arg = { [0] = STRARRAY(which, itimers), }, },
1363 	{ .name	    = "setrlimit",
1364 	  .arg = { [0] = STRARRAY(resource, rlimit_resources),
1365 		   [1] = { .from_user = true /* rlim */, }, }, },
1366 	{ .name	    = "setsockopt",
1367 	  .arg = { [1] = STRARRAY(level, socket_level), }, },
1368 	{ .name	    = "socket",
1369 	  .arg = { [0] = STRARRAY(family, socket_families),
1370 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1371 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1372 	{ .name	    = "socketpair",
1373 	  .arg = { [0] = STRARRAY(family, socket_families),
1374 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1375 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1376 	{ .name	    = "stat", .alias = "newstat", },
1377 	{ .name	    = "statx",
1378 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
1379 		   [2] = { .scnprintf = SCA_FS_AT_FLAGS, /* flags */ } ,
1380 		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
1381 	{ .name	    = "swapoff",
1382 	  .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
1383 	{ .name	    = "swapon",
1384 	  .arg = { [0] = SCA_FILENAME_FROM_USER(specialfile), }, },
1385 	{ .name	    = "symlinkat",
1386 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1387 	{ .name	    = "sync_file_range",
1388 	  .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1389 	{ .name	    = "tgkill",
1390 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1391 	{ .name	    = "tkill",
1392 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1393 	{ .name     = "umount2", .alias = "umount",
1394 	  .arg = { [0] = SCA_FILENAME_FROM_USER(name), }, },
1395 	{ .name	    = "uname", .alias = "newuname", },
1396 	{ .name	    = "unlinkat",
1397 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
1398 		   [1] = SCA_FILENAME_FROM_USER(pathname),
1399 		   [2] = { .scnprintf = SCA_FS_AT_FLAGS,  /* flags */ }, }, },
1400 	{ .name	    = "utimensat",
1401 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1402 	{ .name	    = "wait4",	    .errpid = true,
1403 	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1404 	{ .name	    = "waitid",	    .errpid = true,
1405 	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1406 	{ .name	    = "write",
1407 	  .arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
1408 };
1409 
1410 static int syscall_fmt__cmp(const void *name, const void *fmtp)
1411 {
1412 	const struct syscall_fmt *fmt = fmtp;
1413 	return strcmp(name, fmt->name);
1414 }
1415 
1416 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts,
1417 						     const int nmemb,
1418 						     const char *name)
1419 {
1420 	return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1421 }
1422 
1423 static const struct syscall_fmt *syscall_fmt__find(const char *name)
1424 {
1425 	const int nmemb = ARRAY_SIZE(syscall_fmts);
1426 	return __syscall_fmt__find(syscall_fmts, nmemb, name);
1427 }
1428 
1429 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts,
1430 							      const int nmemb, const char *alias)
1431 {
1432 	int i;
1433 
1434 	for (i = 0; i < nmemb; ++i) {
1435 		if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1436 			return &fmts[i];
1437 	}
1438 
1439 	return NULL;
1440 }
1441 
1442 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1443 {
1444 	const int nmemb = ARRAY_SIZE(syscall_fmts);
1445 	return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1446 }
1447 
1448 /*
1449  * is_exit: is this "exit" or "exit_group"?
1450  * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1451  * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1452  * nonexistent: Just a hole in the syscall table, syscall id not allocated
1453  */
1454 struct syscall {
1455 	struct tep_event    *tp_format;
1456 	int		    nr_args;
1457 	int		    args_size;
1458 	struct {
1459 		struct bpf_program *sys_enter,
1460 				   *sys_exit;
1461 	}		    bpf_prog;
1462 	bool		    is_exit;
1463 	bool		    is_open;
1464 	bool		    nonexistent;
1465 	bool		    use_btf;
1466 	struct tep_format_field *args;
1467 	const char	    *name;
1468 	const struct syscall_fmt  *fmt;
1469 	struct syscall_arg_fmt *arg_fmt;
1470 };
1471 
1472 /*
1473  * We need to have this 'calculated' boolean because in some cases we really
1474  * don't know what is the duration of a syscall, for instance, when we start
1475  * a session and some threads are waiting for a syscall to finish, say 'poll',
1476  * in which case all we can do is to print "( ? ) for duration and for the
1477  * start timestamp.
1478  */
1479 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1480 {
1481 	double duration = (double)t / NSEC_PER_MSEC;
1482 	size_t printed = fprintf(fp, "(");
1483 
1484 	if (!calculated)
1485 		printed += fprintf(fp, "         ");
1486 	else if (duration >= 1.0)
1487 		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1488 	else if (duration >= 0.01)
1489 		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1490 	else
1491 		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1492 	return printed + fprintf(fp, "): ");
1493 }
1494 
1495 /**
1496  * filename.ptr: The filename char pointer that will be vfs_getname'd
1497  * filename.entry_str_pos: Where to insert the string translated from
1498  *                         filename.ptr by the vfs_getname tracepoint/kprobe.
1499  * ret_scnprintf: syscall args may set this to a different syscall return
1500  *                formatter, for instance, fcntl may return fds, file flags, etc.
1501  */
1502 struct thread_trace {
1503 	u64		  entry_time;
1504 	bool		  entry_pending;
1505 	unsigned long	  nr_events;
1506 	unsigned long	  pfmaj, pfmin;
1507 	char		  *entry_str;
1508 	double		  runtime_ms;
1509 	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1510         struct {
1511 		unsigned long ptr;
1512 		short int     entry_str_pos;
1513 		bool	      pending_open;
1514 		unsigned int  namelen;
1515 		char	      *name;
1516 	} filename;
1517 	struct {
1518 		int	      max;
1519 		struct file   *table;
1520 	} files;
1521 
1522 	struct intlist *syscall_stats;
1523 };
1524 
1525 static struct thread_trace *thread_trace__new(void)
1526 {
1527 	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1528 
1529 	if (ttrace) {
1530 		ttrace->files.max = -1;
1531 		ttrace->syscall_stats = intlist__new(NULL);
1532 	}
1533 
1534 	return ttrace;
1535 }
1536 
1537 static void thread_trace__free_files(struct thread_trace *ttrace);
1538 
1539 static void thread_trace__delete(void *pttrace)
1540 {
1541 	struct thread_trace *ttrace = pttrace;
1542 
1543 	if (!ttrace)
1544 		return;
1545 
1546 	intlist__delete(ttrace->syscall_stats);
1547 	ttrace->syscall_stats = NULL;
1548 	thread_trace__free_files(ttrace);
1549 	zfree(&ttrace->entry_str);
1550 	free(ttrace);
1551 }
1552 
1553 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1554 {
1555 	struct thread_trace *ttrace;
1556 
1557 	if (thread == NULL)
1558 		goto fail;
1559 
1560 	if (thread__priv(thread) == NULL)
1561 		thread__set_priv(thread, thread_trace__new());
1562 
1563 	if (thread__priv(thread) == NULL)
1564 		goto fail;
1565 
1566 	ttrace = thread__priv(thread);
1567 	++ttrace->nr_events;
1568 
1569 	return ttrace;
1570 fail:
1571 	color_fprintf(fp, PERF_COLOR_RED,
1572 		      "WARNING: not enough memory, dropping samples!\n");
1573 	return NULL;
1574 }
1575 
1576 
1577 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1578 				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1579 {
1580 	struct thread_trace *ttrace = thread__priv(arg->thread);
1581 
1582 	ttrace->ret_scnprintf = ret_scnprintf;
1583 }
1584 
1585 #define TRACE_PFMAJ		(1 << 0)
1586 #define TRACE_PFMIN		(1 << 1)
1587 
1588 static const size_t trace__entry_str_size = 2048;
1589 
1590 static void thread_trace__free_files(struct thread_trace *ttrace)
1591 {
1592 	for (int i = 0; i < ttrace->files.max; ++i) {
1593 		struct file *file = ttrace->files.table + i;
1594 		zfree(&file->pathname);
1595 	}
1596 
1597 	zfree(&ttrace->files.table);
1598 	ttrace->files.max  = -1;
1599 }
1600 
1601 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1602 {
1603 	if (fd < 0)
1604 		return NULL;
1605 
1606 	if (fd > ttrace->files.max) {
1607 		struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1608 
1609 		if (nfiles == NULL)
1610 			return NULL;
1611 
1612 		if (ttrace->files.max != -1) {
1613 			memset(nfiles + ttrace->files.max + 1, 0,
1614 			       (fd - ttrace->files.max) * sizeof(struct file));
1615 		} else {
1616 			memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1617 		}
1618 
1619 		ttrace->files.table = nfiles;
1620 		ttrace->files.max   = fd;
1621 	}
1622 
1623 	return ttrace->files.table + fd;
1624 }
1625 
1626 struct file *thread__files_entry(struct thread *thread, int fd)
1627 {
1628 	return thread_trace__files_entry(thread__priv(thread), fd);
1629 }
1630 
1631 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1632 {
1633 	struct thread_trace *ttrace = thread__priv(thread);
1634 	struct file *file = thread_trace__files_entry(ttrace, fd);
1635 
1636 	if (file != NULL) {
1637 		struct stat st;
1638 		if (stat(pathname, &st) == 0)
1639 			file->dev_maj = major(st.st_rdev);
1640 		file->pathname = strdup(pathname);
1641 		if (file->pathname)
1642 			return 0;
1643 	}
1644 
1645 	return -1;
1646 }
1647 
1648 static int thread__read_fd_path(struct thread *thread, int fd)
1649 {
1650 	char linkname[PATH_MAX], pathname[PATH_MAX];
1651 	struct stat st;
1652 	int ret;
1653 
1654 	if (thread__pid(thread) == thread__tid(thread)) {
1655 		scnprintf(linkname, sizeof(linkname),
1656 			  "/proc/%d/fd/%d", thread__pid(thread), fd);
1657 	} else {
1658 		scnprintf(linkname, sizeof(linkname),
1659 			  "/proc/%d/task/%d/fd/%d",
1660 			  thread__pid(thread), thread__tid(thread), fd);
1661 	}
1662 
1663 	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1664 		return -1;
1665 
1666 	ret = readlink(linkname, pathname, sizeof(pathname));
1667 
1668 	if (ret < 0 || ret > st.st_size)
1669 		return -1;
1670 
1671 	pathname[ret] = '\0';
1672 	return trace__set_fd_pathname(thread, fd, pathname);
1673 }
1674 
1675 static const char *thread__fd_path(struct thread *thread, int fd,
1676 				   struct trace *trace)
1677 {
1678 	struct thread_trace *ttrace = thread__priv(thread);
1679 
1680 	if (ttrace == NULL || trace->fd_path_disabled)
1681 		return NULL;
1682 
1683 	if (fd < 0)
1684 		return NULL;
1685 
1686 	if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1687 		if (!trace->live)
1688 			return NULL;
1689 		++trace->stats.proc_getname;
1690 		if (thread__read_fd_path(thread, fd))
1691 			return NULL;
1692 	}
1693 
1694 	return ttrace->files.table[fd].pathname;
1695 }
1696 
1697 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1698 {
1699 	int fd = arg->val;
1700 	size_t printed = scnprintf(bf, size, "%d", fd);
1701 	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1702 
1703 	if (path)
1704 		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1705 
1706 	return printed;
1707 }
1708 
1709 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1710 {
1711         size_t printed = scnprintf(bf, size, "%d", fd);
1712 	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1713 
1714 	if (thread) {
1715 		const char *path = thread__fd_path(thread, fd, trace);
1716 
1717 		if (path)
1718 			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1719 
1720 		thread__put(thread);
1721 	}
1722 
1723         return printed;
1724 }
1725 
1726 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1727 					      struct syscall_arg *arg)
1728 {
1729 	int fd = arg->val;
1730 	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1731 	struct thread_trace *ttrace = thread__priv(arg->thread);
1732 
1733 	if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1734 		zfree(&ttrace->files.table[fd].pathname);
1735 
1736 	return printed;
1737 }
1738 
1739 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1740 				     unsigned long ptr)
1741 {
1742 	struct thread_trace *ttrace = thread__priv(thread);
1743 
1744 	ttrace->filename.ptr = ptr;
1745 	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1746 }
1747 
1748 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1749 {
1750 	struct augmented_arg *augmented_arg = arg->augmented.args;
1751 	size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1752 	/*
1753 	 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1754 	 * we would have two strings, each prefixed by its size.
1755 	 */
1756 	int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1757 
1758 	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1759 	arg->augmented.size -= consumed;
1760 
1761 	return printed;
1762 }
1763 
1764 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1765 					      struct syscall_arg *arg)
1766 {
1767 	unsigned long ptr = arg->val;
1768 
1769 	if (arg->augmented.args)
1770 		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1771 
1772 	if (!arg->trace->vfs_getname)
1773 		return scnprintf(bf, size, "%#x", ptr);
1774 
1775 	thread__set_filename_pos(arg->thread, bf, ptr);
1776 	return 0;
1777 }
1778 
1779 #define MAX_CONTROL_CHAR 31
1780 #define MAX_ASCII 127
1781 
1782 static size_t syscall_arg__scnprintf_buf(char *bf, size_t size, struct syscall_arg *arg)
1783 {
1784 	struct augmented_arg *augmented_arg = arg->augmented.args;
1785 	unsigned char *orig = (unsigned char *)augmented_arg->value;
1786 	size_t printed = 0;
1787 	int consumed;
1788 
1789 	if (augmented_arg == NULL)
1790 		return 0;
1791 
1792 	for (int j = 0; j < augmented_arg->size; ++j) {
1793 		bool control_char = orig[j] <= MAX_CONTROL_CHAR || orig[j] >= MAX_ASCII;
1794 		/* print control characters (0~31 and 127), and non-ascii characters in \(digits) */
1795 		printed += scnprintf(bf + printed, size - printed, control_char ? "\\%d" : "%c", (int)orig[j]);
1796 	}
1797 
1798 	consumed = sizeof(*augmented_arg) + augmented_arg->size;
1799 	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1800 	arg->augmented.size -= consumed;
1801 
1802 	return printed;
1803 }
1804 
1805 static bool trace__filter_duration(struct trace *trace, double t)
1806 {
1807 	return t < (trace->duration_filter * NSEC_PER_MSEC);
1808 }
1809 
1810 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1811 {
1812 	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1813 
1814 	return fprintf(fp, "%10.3f ", ts);
1815 }
1816 
1817 /*
1818  * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1819  * using ttrace->entry_time for a thread that receives a sys_exit without
1820  * first having received a sys_enter ("poll" issued before tracing session
1821  * starts, lost sys_enter exit due to ring buffer overflow).
1822  */
1823 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1824 {
1825 	if (tstamp > 0)
1826 		return __trace__fprintf_tstamp(trace, tstamp, fp);
1827 
1828 	return fprintf(fp, "         ? ");
1829 }
1830 
1831 static pid_t workload_pid = -1;
1832 static volatile sig_atomic_t done = false;
1833 static volatile sig_atomic_t interrupted = false;
1834 
1835 static void sighandler_interrupt(int sig __maybe_unused)
1836 {
1837 	done = interrupted = true;
1838 }
1839 
1840 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
1841 			    void *context __maybe_unused)
1842 {
1843 	if (info->si_pid == workload_pid)
1844 		done = true;
1845 }
1846 
1847 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1848 {
1849 	size_t printed = 0;
1850 
1851 	if (trace->multiple_threads) {
1852 		if (trace->show_comm)
1853 			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1854 		printed += fprintf(fp, "%d ", thread__tid(thread));
1855 	}
1856 
1857 	return printed;
1858 }
1859 
1860 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1861 					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1862 {
1863 	size_t printed = 0;
1864 
1865 	if (trace->show_tstamp)
1866 		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1867 	if (trace->show_duration)
1868 		printed += fprintf_duration(duration, duration_calculated, fp);
1869 	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1870 }
1871 
1872 static int trace__process_event(struct trace *trace, struct machine *machine,
1873 				union perf_event *event, struct perf_sample *sample)
1874 {
1875 	int ret = 0;
1876 
1877 	switch (event->header.type) {
1878 	case PERF_RECORD_LOST:
1879 		color_fprintf(trace->output, PERF_COLOR_RED,
1880 			      "LOST %" PRIu64 " events!\n", (u64)event->lost.lost);
1881 		ret = machine__process_lost_event(machine, event, sample);
1882 		break;
1883 	default:
1884 		ret = machine__process_event(machine, event, sample);
1885 		break;
1886 	}
1887 
1888 	return ret;
1889 }
1890 
1891 static int trace__tool_process(const struct perf_tool *tool,
1892 			       union perf_event *event,
1893 			       struct perf_sample *sample,
1894 			       struct machine *machine)
1895 {
1896 	struct trace *trace = container_of(tool, struct trace, tool);
1897 	return trace__process_event(trace, machine, event, sample);
1898 }
1899 
1900 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1901 {
1902 	struct machine *machine = vmachine;
1903 
1904 	if (machine->kptr_restrict_warned)
1905 		return NULL;
1906 
1907 	if (symbol_conf.kptr_restrict) {
1908 		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1909 			   "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1910 			   "Kernel samples will not be resolved.\n");
1911 		machine->kptr_restrict_warned = true;
1912 		return NULL;
1913 	}
1914 
1915 	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1916 }
1917 
1918 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1919 {
1920 	int err = symbol__init(NULL);
1921 
1922 	if (err)
1923 		return err;
1924 
1925 	trace->host = machine__new_host();
1926 	if (trace->host == NULL)
1927 		return -ENOMEM;
1928 
1929 	thread__set_priv_destructor(thread_trace__delete);
1930 
1931 	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1932 	if (err < 0)
1933 		goto out;
1934 
1935 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1936 					    evlist->core.threads, trace__tool_process,
1937 					    true, false, 1);
1938 out:
1939 	if (err)
1940 		symbol__exit();
1941 
1942 	return err;
1943 }
1944 
1945 static void trace__symbols__exit(struct trace *trace)
1946 {
1947 	machine__exit(trace->host);
1948 	trace->host = NULL;
1949 
1950 	symbol__exit();
1951 }
1952 
1953 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1954 {
1955 	int idx;
1956 
1957 	if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1958 		nr_args = sc->fmt->nr_args;
1959 
1960 	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1961 	if (sc->arg_fmt == NULL)
1962 		return -1;
1963 
1964 	for (idx = 0; idx < nr_args; ++idx) {
1965 		if (sc->fmt)
1966 			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1967 	}
1968 
1969 	sc->nr_args = nr_args;
1970 	return 0;
1971 }
1972 
1973 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1974 	{ .name = "msr",	.scnprintf = SCA_X86_MSR,	  .strtoul = STUL_X86_MSR,	   },
1975 	{ .name = "vector",	.scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1976 };
1977 
1978 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1979 {
1980        const struct syscall_arg_fmt *fmt = fmtp;
1981        return strcmp(name, fmt->name);
1982 }
1983 
1984 static const struct syscall_arg_fmt *
1985 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb,
1986 				const char *name)
1987 {
1988        return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1989 }
1990 
1991 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1992 {
1993        const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1994        return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1995 }
1996 
1997 static struct tep_format_field *
1998 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field,
1999 			    bool *use_btf)
2000 {
2001 	struct tep_format_field *last_field = NULL;
2002 	int len;
2003 
2004 	for (; field; field = field->next, ++arg) {
2005 		last_field = field;
2006 
2007 		if (arg->scnprintf)
2008 			continue;
2009 
2010 		len = strlen(field->name);
2011 
2012 		// As far as heuristics (or intention) goes this seems to hold true, and makes sense!
2013 		if ((field->flags & TEP_FIELD_IS_POINTER) && strstarts(field->type, "const "))
2014 			arg->from_user = true;
2015 
2016 		if (strcmp(field->type, "const char *") == 0 &&
2017 		    ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
2018 		     strstr(field->name, "path") != NULL)) {
2019 			arg->scnprintf = SCA_FILENAME;
2020 		} else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
2021 			arg->scnprintf = SCA_PTR;
2022 		else if (strcmp(field->type, "pid_t") == 0)
2023 			arg->scnprintf = SCA_PID;
2024 		else if (strcmp(field->type, "umode_t") == 0)
2025 			arg->scnprintf = SCA_MODE_T;
2026 		else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
2027 			arg->scnprintf = SCA_CHAR_ARRAY;
2028 			arg->nr_entries = field->arraylen;
2029 		} else if ((strcmp(field->type, "int") == 0 ||
2030 			  strcmp(field->type, "unsigned int") == 0 ||
2031 			  strcmp(field->type, "long") == 0) &&
2032 			 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
2033 			/*
2034 			 * /sys/kernel/tracing/events/syscalls/sys_enter*
2035 			 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
2036 			 * 65 int
2037 			 * 23 unsigned int
2038 			 * 7 unsigned long
2039 			 */
2040 			arg->scnprintf = SCA_FD;
2041 		} else if (strstr(field->type, "enum") && use_btf != NULL) {
2042 			*use_btf = true;
2043 			arg->strtoul = STUL_BTF_TYPE;
2044 		} else {
2045 			const struct syscall_arg_fmt *fmt =
2046 				syscall_arg_fmt__find_by_name(field->name);
2047 
2048 			if (fmt) {
2049 				arg->scnprintf = fmt->scnprintf;
2050 				arg->strtoul   = fmt->strtoul;
2051 			}
2052 		}
2053 	}
2054 
2055 	return last_field;
2056 }
2057 
2058 static int syscall__set_arg_fmts(struct syscall *sc)
2059 {
2060 	struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args,
2061 									  &sc->use_btf);
2062 
2063 	if (last_field)
2064 		sc->args_size = last_field->offset + last_field->size;
2065 
2066 	return 0;
2067 }
2068 
2069 static int trace__read_syscall_info(struct trace *trace, int id)
2070 {
2071 	char tp_name[128];
2072 	struct syscall *sc;
2073 	const char *name = syscalltbl__name(trace->sctbl, id);
2074 	int err;
2075 
2076 	if (trace->syscalls.table == NULL) {
2077 		trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
2078 		if (trace->syscalls.table == NULL)
2079 			return -ENOMEM;
2080 	}
2081 	sc = trace->syscalls.table + id;
2082 	if (sc->nonexistent)
2083 		return -EEXIST;
2084 
2085 	if (name == NULL) {
2086 		sc->nonexistent = true;
2087 		return -EEXIST;
2088 	}
2089 
2090 	sc->name = name;
2091 	sc->fmt  = syscall_fmt__find(sc->name);
2092 
2093 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
2094 	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
2095 
2096 	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
2097 		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
2098 		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
2099 	}
2100 
2101 	/*
2102 	 * Fails to read trace point format via sysfs node, so the trace point
2103 	 * doesn't exist.  Set the 'nonexistent' flag as true.
2104 	 */
2105 	if (IS_ERR(sc->tp_format)) {
2106 		sc->nonexistent = true;
2107 		return PTR_ERR(sc->tp_format);
2108 	}
2109 
2110 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
2111 					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
2112 		return -ENOMEM;
2113 
2114 	sc->args = sc->tp_format->format.fields;
2115 	/*
2116 	 * We need to check and discard the first variable '__syscall_nr'
2117 	 * or 'nr' that mean the syscall number. It is needless here.
2118 	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
2119 	 */
2120 	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
2121 		sc->args = sc->args->next;
2122 		--sc->nr_args;
2123 	}
2124 
2125 	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
2126 	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
2127 
2128 	err = syscall__set_arg_fmts(sc);
2129 
2130 	/* after calling syscall__set_arg_fmts() we'll know whether use_btf is true */
2131 	if (sc->use_btf)
2132 		trace__load_vmlinux_btf(trace);
2133 
2134 	return err;
2135 }
2136 
2137 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel, bool *use_btf)
2138 {
2139 	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
2140 
2141 	if (fmt != NULL) {
2142 		const struct tep_event *tp_format = evsel__tp_format(evsel);
2143 
2144 		if (tp_format) {
2145 			syscall_arg_fmt__init_array(fmt, tp_format->format.fields, use_btf);
2146 			return 0;
2147 		}
2148 	}
2149 
2150 	return -ENOMEM;
2151 }
2152 
2153 static int intcmp(const void *a, const void *b)
2154 {
2155 	const int *one = a, *another = b;
2156 
2157 	return *one - *another;
2158 }
2159 
2160 static int trace__validate_ev_qualifier(struct trace *trace)
2161 {
2162 	int err = 0;
2163 	bool printed_invalid_prefix = false;
2164 	struct str_node *pos;
2165 	size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
2166 
2167 	trace->ev_qualifier_ids.entries = malloc(nr_allocated *
2168 						 sizeof(trace->ev_qualifier_ids.entries[0]));
2169 
2170 	if (trace->ev_qualifier_ids.entries == NULL) {
2171 		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
2172 		       trace->output);
2173 		err = -EINVAL;
2174 		goto out;
2175 	}
2176 
2177 	strlist__for_each_entry(pos, trace->ev_qualifier) {
2178 		const char *sc = pos->s;
2179 		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
2180 
2181 		if (id < 0) {
2182 			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
2183 			if (id >= 0)
2184 				goto matches;
2185 
2186 			if (!printed_invalid_prefix) {
2187 				pr_debug("Skipping unknown syscalls: ");
2188 				printed_invalid_prefix = true;
2189 			} else {
2190 				pr_debug(", ");
2191 			}
2192 
2193 			pr_debug("%s", sc);
2194 			continue;
2195 		}
2196 matches:
2197 		trace->ev_qualifier_ids.entries[nr_used++] = id;
2198 		if (match_next == -1)
2199 			continue;
2200 
2201 		while (1) {
2202 			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
2203 			if (id < 0)
2204 				break;
2205 			if (nr_allocated == nr_used) {
2206 				void *entries;
2207 
2208 				nr_allocated += 8;
2209 				entries = realloc(trace->ev_qualifier_ids.entries,
2210 						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
2211 				if (entries == NULL) {
2212 					err = -ENOMEM;
2213 					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
2214 					goto out_free;
2215 				}
2216 				trace->ev_qualifier_ids.entries = entries;
2217 			}
2218 			trace->ev_qualifier_ids.entries[nr_used++] = id;
2219 		}
2220 	}
2221 
2222 	trace->ev_qualifier_ids.nr = nr_used;
2223 	qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
2224 out:
2225 	if (printed_invalid_prefix)
2226 		pr_debug("\n");
2227 	return err;
2228 out_free:
2229 	zfree(&trace->ev_qualifier_ids.entries);
2230 	trace->ev_qualifier_ids.nr = 0;
2231 	goto out;
2232 }
2233 
2234 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
2235 {
2236 	bool in_ev_qualifier;
2237 
2238 	if (trace->ev_qualifier_ids.nr == 0)
2239 		return true;
2240 
2241 	in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
2242 				  trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
2243 
2244 	if (in_ev_qualifier)
2245 	       return !trace->not_ev_qualifier;
2246 
2247 	return trace->not_ev_qualifier;
2248 }
2249 
2250 /*
2251  * args is to be interpreted as a series of longs but we need to handle
2252  * 8-byte unaligned accesses. args points to raw_data within the event
2253  * and raw_data is guaranteed to be 8-byte unaligned because it is
2254  * preceded by raw_size which is a u32. So we need to copy args to a temp
2255  * variable to read it. Most notably this avoids extended load instructions
2256  * on unaligned addresses
2257  */
2258 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
2259 {
2260 	unsigned long val;
2261 	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
2262 
2263 	memcpy(&val, p, sizeof(val));
2264 	return val;
2265 }
2266 
2267 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
2268 				      struct syscall_arg *arg)
2269 {
2270 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
2271 		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
2272 
2273 	return scnprintf(bf, size, "arg%d: ", arg->idx);
2274 }
2275 
2276 /*
2277  * Check if the value is in fact zero, i.e. mask whatever needs masking, such
2278  * as mount 'flags' argument that needs ignoring some magic flag, see comment
2279  * in tools/perf/trace/beauty/mount_flags.c
2280  */
2281 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
2282 {
2283 	if (fmt && fmt->mask_val)
2284 		return fmt->mask_val(arg, val);
2285 
2286 	return val;
2287 }
2288 
2289 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
2290 					     struct syscall_arg *arg, unsigned long val)
2291 {
2292 	if (fmt && fmt->scnprintf) {
2293 		arg->val = val;
2294 		if (fmt->parm)
2295 			arg->parm = fmt->parm;
2296 		return fmt->scnprintf(bf, size, arg);
2297 	}
2298 	return scnprintf(bf, size, "%ld", val);
2299 }
2300 
2301 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
2302 				      unsigned char *args, void *augmented_args, int augmented_args_size,
2303 				      struct trace *trace, struct thread *thread)
2304 {
2305 	size_t printed = 0, btf_printed;
2306 	unsigned long val;
2307 	u8 bit = 1;
2308 	struct syscall_arg arg = {
2309 		.args	= args,
2310 		.augmented = {
2311 			.size = augmented_args_size,
2312 			.args = augmented_args,
2313 		},
2314 		.idx	= 0,
2315 		.mask	= 0,
2316 		.trace  = trace,
2317 		.thread = thread,
2318 		.show_string_prefix = trace->show_string_prefix,
2319 	};
2320 	struct thread_trace *ttrace = thread__priv(thread);
2321 	void *default_scnprintf;
2322 
2323 	/*
2324 	 * Things like fcntl will set this in its 'cmd' formatter to pick the
2325 	 * right formatter for the return value (an fd? file flags?), which is
2326 	 * not needed for syscalls that always return a given type, say an fd.
2327 	 */
2328 	ttrace->ret_scnprintf = NULL;
2329 
2330 	if (sc->args != NULL) {
2331 		struct tep_format_field *field;
2332 
2333 		for (field = sc->args; field;
2334 		     field = field->next, ++arg.idx, bit <<= 1) {
2335 			if (arg.mask & bit)
2336 				continue;
2337 
2338 			arg.fmt = &sc->arg_fmt[arg.idx];
2339 			val = syscall_arg__val(&arg, arg.idx);
2340 			/*
2341 			 * Some syscall args need some mask, most don't and
2342 			 * return val untouched.
2343 			 */
2344 			val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2345 
2346 			/*
2347 			 * Suppress this argument if its value is zero and show_zero
2348 			 * property isn't set.
2349 			 *
2350 			 * If it has a BTF type, then override the zero suppression knob
2351 			 * as the common case is for zero in an enum to have an associated entry.
2352 			 */
2353 			if (val == 0 && !trace->show_zeros &&
2354 			    !(sc->arg_fmt && sc->arg_fmt[arg.idx].show_zero) &&
2355 			    !(sc->arg_fmt && sc->arg_fmt[arg.idx].strtoul == STUL_BTF_TYPE))
2356 				continue;
2357 
2358 			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2359 
2360 			if (trace->show_arg_names)
2361 				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2362 
2363 			default_scnprintf = sc->arg_fmt[arg.idx].scnprintf;
2364 
2365 			if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) {
2366 				btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed,
2367 								   size - printed, val, field->type);
2368 				if (btf_printed) {
2369 					printed += btf_printed;
2370 					continue;
2371 				}
2372 			}
2373 
2374 			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2375 								  bf + printed, size - printed, &arg, val);
2376 		}
2377 	} else if (IS_ERR(sc->tp_format)) {
2378 		/*
2379 		 * If we managed to read the tracepoint /format file, then we
2380 		 * may end up not having any args, like with gettid(), so only
2381 		 * print the raw args when we didn't manage to read it.
2382 		 */
2383 		while (arg.idx < sc->nr_args) {
2384 			if (arg.mask & bit)
2385 				goto next_arg;
2386 			val = syscall_arg__val(&arg, arg.idx);
2387 			if (printed)
2388 				printed += scnprintf(bf + printed, size - printed, ", ");
2389 			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2390 			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2391 next_arg:
2392 			++arg.idx;
2393 			bit <<= 1;
2394 		}
2395 	}
2396 
2397 	return printed;
2398 }
2399 
2400 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2401 				  union perf_event *event,
2402 				  struct perf_sample *sample);
2403 
2404 static struct syscall *trace__syscall_info(struct trace *trace,
2405 					   struct evsel *evsel, int id)
2406 {
2407 	int err = 0;
2408 
2409 	if (id < 0) {
2410 
2411 		/*
2412 		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2413 		 * before that, leaving at a higher verbosity level till that is
2414 		 * explained. Reproduced with plain ftrace with:
2415 		 *
2416 		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2417 		 * grep "NR -1 " /t/trace_pipe
2418 		 *
2419 		 * After generating some load on the machine.
2420  		 */
2421 		if (verbose > 1) {
2422 			static u64 n;
2423 			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2424 				id, evsel__name(evsel), ++n);
2425 		}
2426 		return NULL;
2427 	}
2428 
2429 	err = -EINVAL;
2430 
2431 	if (id > trace->sctbl->syscalls.max_id) {
2432 		goto out_cant_read;
2433 	}
2434 
2435 	if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2436 	    (err = trace__read_syscall_info(trace, id)) != 0)
2437 		goto out_cant_read;
2438 
2439 	if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2440 		goto out_cant_read;
2441 
2442 	return &trace->syscalls.table[id];
2443 
2444 out_cant_read:
2445 	if (verbose > 0) {
2446 		char sbuf[STRERR_BUFSIZE];
2447 		fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2448 		if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2449 			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2450 		fputs(" information\n", trace->output);
2451 	}
2452 	return NULL;
2453 }
2454 
2455 struct syscall_stats {
2456 	struct stats stats;
2457 	u64	     nr_failures;
2458 	int	     max_errno;
2459 	u32	     *errnos;
2460 };
2461 
2462 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2463 				 int id, struct perf_sample *sample, long err, bool errno_summary)
2464 {
2465 	struct int_node *inode;
2466 	struct syscall_stats *stats;
2467 	u64 duration = 0;
2468 
2469 	inode = intlist__findnew(ttrace->syscall_stats, id);
2470 	if (inode == NULL)
2471 		return;
2472 
2473 	stats = inode->priv;
2474 	if (stats == NULL) {
2475 		stats = zalloc(sizeof(*stats));
2476 		if (stats == NULL)
2477 			return;
2478 
2479 		init_stats(&stats->stats);
2480 		inode->priv = stats;
2481 	}
2482 
2483 	if (ttrace->entry_time && sample->time > ttrace->entry_time)
2484 		duration = sample->time - ttrace->entry_time;
2485 
2486 	update_stats(&stats->stats, duration);
2487 
2488 	if (err < 0) {
2489 		++stats->nr_failures;
2490 
2491 		if (!errno_summary)
2492 			return;
2493 
2494 		err = -err;
2495 		if (err > stats->max_errno) {
2496 			u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2497 
2498 			if (new_errnos) {
2499 				memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2500 			} else {
2501 				pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2502 					 thread__comm_str(thread), thread__pid(thread),
2503 					 thread__tid(thread));
2504 				return;
2505 			}
2506 
2507 			stats->errnos = new_errnos;
2508 			stats->max_errno = err;
2509 		}
2510 
2511 		++stats->errnos[err - 1];
2512 	}
2513 }
2514 
2515 static int trace__printf_interrupted_entry(struct trace *trace)
2516 {
2517 	struct thread_trace *ttrace;
2518 	size_t printed;
2519 	int len;
2520 
2521 	if (trace->failure_only || trace->current == NULL)
2522 		return 0;
2523 
2524 	ttrace = thread__priv(trace->current);
2525 
2526 	if (!ttrace->entry_pending)
2527 		return 0;
2528 
2529 	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2530 	printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2531 
2532 	if (len < trace->args_alignment - 4)
2533 		printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2534 
2535 	printed += fprintf(trace->output, " ...\n");
2536 
2537 	ttrace->entry_pending = false;
2538 	++trace->nr_events_printed;
2539 
2540 	return printed;
2541 }
2542 
2543 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2544 				 struct perf_sample *sample, struct thread *thread)
2545 {
2546 	int printed = 0;
2547 
2548 	if (trace->print_sample) {
2549 		double ts = (double)sample->time / NSEC_PER_MSEC;
2550 
2551 		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2552 				   evsel__name(evsel), ts,
2553 				   thread__comm_str(thread),
2554 				   sample->pid, sample->tid, sample->cpu);
2555 	}
2556 
2557 	return printed;
2558 }
2559 
2560 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2561 {
2562 	/*
2563 	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2564 	 * and there we get all 6 syscall args plus the tracepoint common fields
2565 	 * that gets calculated at the start and the syscall_nr (another long).
2566 	 * So we check if that is the case and if so don't look after the
2567 	 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2568 	 * which is fixed.
2569 	 *
2570 	 * We'll revisit this later to pass s->args_size to the BPF augmenter
2571 	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2572 	 * copies only what we need for each syscall, like what happens when we
2573 	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2574 	 * traffic to just what is needed for each syscall.
2575 	 */
2576 	int args_size = raw_augmented_args_size ?: sc->args_size;
2577 
2578 	*augmented_args_size = sample->raw_size - args_size;
2579 	if (*augmented_args_size > 0) {
2580 		static uintptr_t argbuf[1024]; /* assuming single-threaded */
2581 
2582 		if ((size_t)(*augmented_args_size) > sizeof(argbuf))
2583 			return NULL;
2584 
2585 		/*
2586 		 * The perf ring-buffer is 8-byte aligned but sample->raw_data
2587 		 * is not because it's preceded by u32 size.  Later, beautifier
2588 		 * will use the augmented args with stricter alignments like in
2589 		 * some struct.  To make sure it's aligned, let's copy the args
2590 		 * into a static buffer as it's single-threaded for now.
2591 		 */
2592 		memcpy(argbuf, sample->raw_data + args_size, *augmented_args_size);
2593 
2594 		return argbuf;
2595 	}
2596 	return NULL;
2597 }
2598 
2599 static void syscall__exit(struct syscall *sc)
2600 {
2601 	if (!sc)
2602 		return;
2603 
2604 	zfree(&sc->arg_fmt);
2605 }
2606 
2607 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2608 			    union perf_event *event __maybe_unused,
2609 			    struct perf_sample *sample)
2610 {
2611 	char *msg;
2612 	void *args;
2613 	int printed = 0;
2614 	struct thread *thread;
2615 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2616 	int augmented_args_size = 0;
2617 	void *augmented_args = NULL;
2618 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2619 	struct thread_trace *ttrace;
2620 
2621 	if (sc == NULL)
2622 		return -1;
2623 
2624 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2625 	ttrace = thread__trace(thread, trace->output);
2626 	if (ttrace == NULL)
2627 		goto out_put;
2628 
2629 	trace__fprintf_sample(trace, evsel, sample, thread);
2630 
2631 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2632 
2633 	if (ttrace->entry_str == NULL) {
2634 		ttrace->entry_str = malloc(trace__entry_str_size);
2635 		if (!ttrace->entry_str)
2636 			goto out_put;
2637 	}
2638 
2639 	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2640 		trace__printf_interrupted_entry(trace);
2641 	/*
2642 	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2643 	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2644 	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2645 	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2646 	 * so when handling, say the openat syscall, we end up getting 6 args for the
2647 	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2648 	 * thinking that the extra 2 u64 args are the augmented filename, so just check
2649 	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2650 	 */
2651 	if (evsel != trace->syscalls.events.sys_enter)
2652 		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2653 	ttrace->entry_time = sample->time;
2654 	msg = ttrace->entry_str;
2655 	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2656 
2657 	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2658 					   args, augmented_args, augmented_args_size, trace, thread);
2659 
2660 	if (sc->is_exit) {
2661 		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2662 			int alignment = 0;
2663 
2664 			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2665 			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2666 			if (trace->args_alignment > printed)
2667 				alignment = trace->args_alignment - printed;
2668 			fprintf(trace->output, "%*s= ?\n", alignment, " ");
2669 		}
2670 	} else {
2671 		ttrace->entry_pending = true;
2672 		/* See trace__vfs_getname & trace__sys_exit */
2673 		ttrace->filename.pending_open = false;
2674 	}
2675 
2676 	if (trace->current != thread) {
2677 		thread__put(trace->current);
2678 		trace->current = thread__get(thread);
2679 	}
2680 	err = 0;
2681 out_put:
2682 	thread__put(thread);
2683 	return err;
2684 }
2685 
2686 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2687 				    struct perf_sample *sample)
2688 {
2689 	struct thread_trace *ttrace;
2690 	struct thread *thread;
2691 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2692 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2693 	char msg[1024];
2694 	void *args, *augmented_args = NULL;
2695 	int augmented_args_size;
2696 	size_t printed = 0;
2697 
2698 	if (sc == NULL)
2699 		return -1;
2700 
2701 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2702 	ttrace = thread__trace(thread, trace->output);
2703 	/*
2704 	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2705 	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2706 	 */
2707 	if (ttrace == NULL)
2708 		goto out_put;
2709 
2710 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2711 	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2712 	printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2713 	fprintf(trace->output, "%.*s", (int)printed, msg);
2714 	err = 0;
2715 out_put:
2716 	thread__put(thread);
2717 	return err;
2718 }
2719 
2720 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2721 				    struct perf_sample *sample,
2722 				    struct callchain_cursor *cursor)
2723 {
2724 	struct addr_location al;
2725 	int max_stack = evsel->core.attr.sample_max_stack ?
2726 			evsel->core.attr.sample_max_stack :
2727 			trace->max_stack;
2728 	int err = -1;
2729 
2730 	addr_location__init(&al);
2731 	if (machine__resolve(trace->host, &al, sample) < 0)
2732 		goto out;
2733 
2734 	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2735 out:
2736 	addr_location__exit(&al);
2737 	return err;
2738 }
2739 
2740 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2741 {
2742 	/* TODO: user-configurable print_opts */
2743 	const unsigned int print_opts = EVSEL__PRINT_SYM |
2744 				        EVSEL__PRINT_DSO |
2745 				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
2746 
2747 	return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2748 }
2749 
2750 static const char *errno_to_name(struct evsel *evsel, int err)
2751 {
2752 	struct perf_env *env = evsel__env(evsel);
2753 
2754 	return perf_env__arch_strerrno(env, err);
2755 }
2756 
2757 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2758 			   union perf_event *event __maybe_unused,
2759 			   struct perf_sample *sample)
2760 {
2761 	long ret;
2762 	u64 duration = 0;
2763 	bool duration_calculated = false;
2764 	struct thread *thread;
2765 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2766 	int alignment = trace->args_alignment;
2767 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2768 	struct thread_trace *ttrace;
2769 
2770 	if (sc == NULL)
2771 		return -1;
2772 
2773 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2774 	ttrace = thread__trace(thread, trace->output);
2775 	if (ttrace == NULL)
2776 		goto out_put;
2777 
2778 	trace__fprintf_sample(trace, evsel, sample, thread);
2779 
2780 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2781 
2782 	if (trace->summary)
2783 		thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2784 
2785 	if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2786 		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2787 		ttrace->filename.pending_open = false;
2788 		++trace->stats.vfs_getname;
2789 	}
2790 
2791 	if (ttrace->entry_time) {
2792 		duration = sample->time - ttrace->entry_time;
2793 		if (trace__filter_duration(trace, duration))
2794 			goto out;
2795 		duration_calculated = true;
2796 	} else if (trace->duration_filter)
2797 		goto out;
2798 
2799 	if (sample->callchain) {
2800 		struct callchain_cursor *cursor = get_tls_callchain_cursor();
2801 
2802 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2803 		if (callchain_ret == 0) {
2804 			if (cursor->nr < trace->min_stack)
2805 				goto out;
2806 			callchain_ret = 1;
2807 		}
2808 	}
2809 
2810 	if (trace->summary_only || (ret >= 0 && trace->failure_only))
2811 		goto out;
2812 
2813 	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2814 
2815 	if (ttrace->entry_pending) {
2816 		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2817 	} else {
2818 		printed += fprintf(trace->output, " ... [");
2819 		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2820 		printed += 9;
2821 		printed += fprintf(trace->output, "]: %s()", sc->name);
2822 	}
2823 
2824 	printed++; /* the closing ')' */
2825 
2826 	if (alignment > printed)
2827 		alignment -= printed;
2828 	else
2829 		alignment = 0;
2830 
2831 	fprintf(trace->output, ")%*s= ", alignment, " ");
2832 
2833 	if (sc->fmt == NULL) {
2834 		if (ret < 0)
2835 			goto errno_print;
2836 signed_print:
2837 		fprintf(trace->output, "%ld", ret);
2838 	} else if (ret < 0) {
2839 errno_print: {
2840 		char bf[STRERR_BUFSIZE];
2841 		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2842 			   *e = errno_to_name(evsel, -ret);
2843 
2844 		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2845 	}
2846 	} else if (ret == 0 && sc->fmt->timeout)
2847 		fprintf(trace->output, "0 (Timeout)");
2848 	else if (ttrace->ret_scnprintf) {
2849 		char bf[1024];
2850 		struct syscall_arg arg = {
2851 			.val	= ret,
2852 			.thread	= thread,
2853 			.trace	= trace,
2854 		};
2855 		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2856 		ttrace->ret_scnprintf = NULL;
2857 		fprintf(trace->output, "%s", bf);
2858 	} else if (sc->fmt->hexret)
2859 		fprintf(trace->output, "%#lx", ret);
2860 	else if (sc->fmt->errpid) {
2861 		struct thread *child = machine__find_thread(trace->host, ret, ret);
2862 
2863 		if (child != NULL) {
2864 			fprintf(trace->output, "%ld", ret);
2865 			if (thread__comm_set(child))
2866 				fprintf(trace->output, " (%s)", thread__comm_str(child));
2867 			thread__put(child);
2868 		}
2869 	} else
2870 		goto signed_print;
2871 
2872 	fputc('\n', trace->output);
2873 
2874 	/*
2875 	 * We only consider an 'event' for the sake of --max-events a non-filtered
2876 	 * sys_enter + sys_exit and other tracepoint events.
2877 	 */
2878 	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2879 		interrupted = true;
2880 
2881 	if (callchain_ret > 0)
2882 		trace__fprintf_callchain(trace, sample);
2883 	else if (callchain_ret < 0)
2884 		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2885 out:
2886 	ttrace->entry_pending = false;
2887 	err = 0;
2888 out_put:
2889 	thread__put(thread);
2890 	return err;
2891 }
2892 
2893 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2894 			      union perf_event *event __maybe_unused,
2895 			      struct perf_sample *sample)
2896 {
2897 	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2898 	struct thread_trace *ttrace;
2899 	size_t filename_len, entry_str_len, to_move;
2900 	ssize_t remaining_space;
2901 	char *pos;
2902 	const char *filename = evsel__rawptr(evsel, sample, "pathname");
2903 
2904 	if (!thread)
2905 		goto out;
2906 
2907 	ttrace = thread__priv(thread);
2908 	if (!ttrace)
2909 		goto out_put;
2910 
2911 	filename_len = strlen(filename);
2912 	if (filename_len == 0)
2913 		goto out_put;
2914 
2915 	if (ttrace->filename.namelen < filename_len) {
2916 		char *f = realloc(ttrace->filename.name, filename_len + 1);
2917 
2918 		if (f == NULL)
2919 			goto out_put;
2920 
2921 		ttrace->filename.namelen = filename_len;
2922 		ttrace->filename.name = f;
2923 	}
2924 
2925 	strcpy(ttrace->filename.name, filename);
2926 	ttrace->filename.pending_open = true;
2927 
2928 	if (!ttrace->filename.ptr)
2929 		goto out_put;
2930 
2931 	entry_str_len = strlen(ttrace->entry_str);
2932 	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2933 	if (remaining_space <= 0)
2934 		goto out_put;
2935 
2936 	if (filename_len > (size_t)remaining_space) {
2937 		filename += filename_len - remaining_space;
2938 		filename_len = remaining_space;
2939 	}
2940 
2941 	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2942 	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2943 	memmove(pos + filename_len, pos, to_move);
2944 	memcpy(pos, filename, filename_len);
2945 
2946 	ttrace->filename.ptr = 0;
2947 	ttrace->filename.entry_str_pos = 0;
2948 out_put:
2949 	thread__put(thread);
2950 out:
2951 	return 0;
2952 }
2953 
2954 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2955 				     union perf_event *event __maybe_unused,
2956 				     struct perf_sample *sample)
2957 {
2958         u64 runtime = evsel__intval(evsel, sample, "runtime");
2959 	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2960 	struct thread *thread = machine__findnew_thread(trace->host,
2961 							sample->pid,
2962 							sample->tid);
2963 	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2964 
2965 	if (ttrace == NULL)
2966 		goto out_dump;
2967 
2968 	ttrace->runtime_ms += runtime_ms;
2969 	trace->runtime_ms += runtime_ms;
2970 out_put:
2971 	thread__put(thread);
2972 	return 0;
2973 
2974 out_dump:
2975 	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2976 	       evsel->name,
2977 	       evsel__strval(evsel, sample, "comm"),
2978 	       (pid_t)evsel__intval(evsel, sample, "pid"),
2979 	       runtime,
2980 	       evsel__intval(evsel, sample, "vruntime"));
2981 	goto out_put;
2982 }
2983 
2984 static int bpf_output__printer(enum binary_printer_ops op,
2985 			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2986 {
2987 	unsigned char ch = (unsigned char)val;
2988 
2989 	switch (op) {
2990 	case BINARY_PRINT_CHAR_DATA:
2991 		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2992 	case BINARY_PRINT_DATA_BEGIN:
2993 	case BINARY_PRINT_LINE_BEGIN:
2994 	case BINARY_PRINT_ADDR:
2995 	case BINARY_PRINT_NUM_DATA:
2996 	case BINARY_PRINT_NUM_PAD:
2997 	case BINARY_PRINT_SEP:
2998 	case BINARY_PRINT_CHAR_PAD:
2999 	case BINARY_PRINT_LINE_END:
3000 	case BINARY_PRINT_DATA_END:
3001 	default:
3002 		break;
3003 	}
3004 
3005 	return 0;
3006 }
3007 
3008 static void bpf_output__fprintf(struct trace *trace,
3009 				struct perf_sample *sample)
3010 {
3011 	binary__fprintf(sample->raw_data, sample->raw_size, 8,
3012 			bpf_output__printer, NULL, trace->output);
3013 	++trace->nr_events_printed;
3014 }
3015 
3016 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
3017 				       struct thread *thread, void *augmented_args, int augmented_args_size)
3018 {
3019 	char bf[2048];
3020 	size_t size = sizeof(bf);
3021 	const struct tep_event *tp_format = evsel__tp_format(evsel);
3022 	struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3023 	struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
3024 	size_t printed = 0, btf_printed;
3025 	unsigned long val;
3026 	u8 bit = 1;
3027 	struct syscall_arg syscall_arg = {
3028 		.augmented = {
3029 			.size = augmented_args_size,
3030 			.args = augmented_args,
3031 		},
3032 		.idx	= 0,
3033 		.mask	= 0,
3034 		.trace  = trace,
3035 		.thread = thread,
3036 		.show_string_prefix = trace->show_string_prefix,
3037 	};
3038 
3039 	for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
3040 		if (syscall_arg.mask & bit)
3041 			continue;
3042 
3043 		syscall_arg.len = 0;
3044 		syscall_arg.fmt = arg;
3045 		if (field->flags & TEP_FIELD_IS_ARRAY) {
3046 			int offset = field->offset;
3047 
3048 			if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3049 				offset = format_field__intval(field, sample, evsel->needs_swap);
3050 				syscall_arg.len = offset >> 16;
3051 				offset &= 0xffff;
3052 				if (tep_field_is_relative(field->flags))
3053 					offset += field->offset + field->size;
3054 			}
3055 
3056 			val = (uintptr_t)(sample->raw_data + offset);
3057 		} else
3058 			val = format_field__intval(field, sample, evsel->needs_swap);
3059 		/*
3060 		 * Some syscall args need some mask, most don't and
3061 		 * return val untouched.
3062 		 */
3063 		val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
3064 
3065 		/* Suppress this argument if its value is zero and show_zero property isn't set. */
3066 		if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE)
3067 			continue;
3068 
3069 		printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
3070 
3071 		if (trace->show_arg_names)
3072 			printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
3073 
3074 		btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->type);
3075 		if (btf_printed) {
3076 			printed += btf_printed;
3077 			continue;
3078 		}
3079 
3080 		printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
3081 	}
3082 
3083 	return printed + fprintf(trace->output, "%.*s", (int)printed, bf);
3084 }
3085 
3086 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
3087 				union perf_event *event __maybe_unused,
3088 				struct perf_sample *sample)
3089 {
3090 	struct thread *thread;
3091 	int callchain_ret = 0;
3092 
3093 	if (evsel->nr_events_printed >= evsel->max_events)
3094 		return 0;
3095 
3096 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3097 
3098 	if (sample->callchain) {
3099 		struct callchain_cursor *cursor = get_tls_callchain_cursor();
3100 
3101 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3102 		if (callchain_ret == 0) {
3103 			if (cursor->nr < trace->min_stack)
3104 				goto out;
3105 			callchain_ret = 1;
3106 		}
3107 	}
3108 
3109 	trace__printf_interrupted_entry(trace);
3110 	trace__fprintf_tstamp(trace, sample->time, trace->output);
3111 
3112 	if (trace->trace_syscalls && trace->show_duration)
3113 		fprintf(trace->output, "(         ): ");
3114 
3115 	if (thread)
3116 		trace__fprintf_comm_tid(trace, thread, trace->output);
3117 
3118 	if (evsel == trace->syscalls.events.bpf_output) {
3119 		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
3120 		struct syscall *sc = trace__syscall_info(trace, evsel, id);
3121 
3122 		if (sc) {
3123 			fprintf(trace->output, "%s(", sc->name);
3124 			trace__fprintf_sys_enter(trace, evsel, sample);
3125 			fputc(')', trace->output);
3126 			goto newline;
3127 		}
3128 
3129 		/*
3130 		 * XXX: Not having the associated syscall info or not finding/adding
3131 		 * 	the thread should never happen, but if it does...
3132 		 * 	fall thru and print it as a bpf_output event.
3133 		 */
3134 	}
3135 
3136 	fprintf(trace->output, "%s(", evsel->name);
3137 
3138 	if (evsel__is_bpf_output(evsel)) {
3139 		bpf_output__fprintf(trace, sample);
3140 	} else {
3141 		const struct tep_event *tp_format = evsel__tp_format(evsel);
3142 
3143 		if (tp_format && (strncmp(tp_format->name, "sys_enter_", 10) ||
3144 				  trace__fprintf_sys_enter(trace, evsel, sample))) {
3145 			if (trace->libtraceevent_print) {
3146 				event_format__fprintf(tp_format, sample->cpu,
3147 						      sample->raw_data, sample->raw_size,
3148 						      trace->output);
3149 			} else {
3150 				trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
3151 			}
3152 		}
3153 	}
3154 
3155 newline:
3156 	fprintf(trace->output, ")\n");
3157 
3158 	if (callchain_ret > 0)
3159 		trace__fprintf_callchain(trace, sample);
3160 	else if (callchain_ret < 0)
3161 		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
3162 
3163 	++trace->nr_events_printed;
3164 
3165 	if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
3166 		evsel__disable(evsel);
3167 		evsel__close(evsel);
3168 	}
3169 out:
3170 	thread__put(thread);
3171 	return 0;
3172 }
3173 
3174 static void print_location(FILE *f, struct perf_sample *sample,
3175 			   struct addr_location *al,
3176 			   bool print_dso, bool print_sym)
3177 {
3178 
3179 	if ((verbose > 0 || print_dso) && al->map)
3180 		fprintf(f, "%s@", dso__long_name(map__dso(al->map)));
3181 
3182 	if ((verbose > 0 || print_sym) && al->sym)
3183 		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
3184 			al->addr - al->sym->start);
3185 	else if (al->map)
3186 		fprintf(f, "0x%" PRIx64, al->addr);
3187 	else
3188 		fprintf(f, "0x%" PRIx64, sample->addr);
3189 }
3190 
3191 static int trace__pgfault(struct trace *trace,
3192 			  struct evsel *evsel,
3193 			  union perf_event *event __maybe_unused,
3194 			  struct perf_sample *sample)
3195 {
3196 	struct thread *thread;
3197 	struct addr_location al;
3198 	char map_type = 'd';
3199 	struct thread_trace *ttrace;
3200 	int err = -1;
3201 	int callchain_ret = 0;
3202 
3203 	addr_location__init(&al);
3204 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3205 
3206 	if (sample->callchain) {
3207 		struct callchain_cursor *cursor = get_tls_callchain_cursor();
3208 
3209 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3210 		if (callchain_ret == 0) {
3211 			if (cursor->nr < trace->min_stack)
3212 				goto out_put;
3213 			callchain_ret = 1;
3214 		}
3215 	}
3216 
3217 	ttrace = thread__trace(thread, trace->output);
3218 	if (ttrace == NULL)
3219 		goto out_put;
3220 
3221 	if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
3222 		ttrace->pfmaj++;
3223 	else
3224 		ttrace->pfmin++;
3225 
3226 	if (trace->summary_only)
3227 		goto out;
3228 
3229 	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
3230 
3231 	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
3232 
3233 	fprintf(trace->output, "%sfault [",
3234 		evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
3235 		"maj" : "min");
3236 
3237 	print_location(trace->output, sample, &al, false, true);
3238 
3239 	fprintf(trace->output, "] => ");
3240 
3241 	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
3242 
3243 	if (!al.map) {
3244 		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
3245 
3246 		if (al.map)
3247 			map_type = 'x';
3248 		else
3249 			map_type = '?';
3250 	}
3251 
3252 	print_location(trace->output, sample, &al, true, false);
3253 
3254 	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
3255 
3256 	if (callchain_ret > 0)
3257 		trace__fprintf_callchain(trace, sample);
3258 	else if (callchain_ret < 0)
3259 		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
3260 
3261 	++trace->nr_events_printed;
3262 out:
3263 	err = 0;
3264 out_put:
3265 	thread__put(thread);
3266 	addr_location__exit(&al);
3267 	return err;
3268 }
3269 
3270 static void trace__set_base_time(struct trace *trace,
3271 				 struct evsel *evsel,
3272 				 struct perf_sample *sample)
3273 {
3274 	/*
3275 	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
3276 	 * and don't use sample->time unconditionally, we may end up having
3277 	 * some other event in the future without PERF_SAMPLE_TIME for good
3278 	 * reason, i.e. we may not be interested in its timestamps, just in
3279 	 * it taking place, picking some piece of information when it
3280 	 * appears in our event stream (vfs_getname comes to mind).
3281 	 */
3282 	if (trace->base_time == 0 && !trace->full_time &&
3283 	    (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
3284 		trace->base_time = sample->time;
3285 }
3286 
3287 static int trace__process_sample(const struct perf_tool *tool,
3288 				 union perf_event *event,
3289 				 struct perf_sample *sample,
3290 				 struct evsel *evsel,
3291 				 struct machine *machine __maybe_unused)
3292 {
3293 	struct trace *trace = container_of(tool, struct trace, tool);
3294 	struct thread *thread;
3295 	int err = 0;
3296 
3297 	tracepoint_handler handler = evsel->handler;
3298 
3299 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3300 	if (thread && thread__is_filtered(thread))
3301 		goto out;
3302 
3303 	trace__set_base_time(trace, evsel, sample);
3304 
3305 	if (handler) {
3306 		++trace->nr_events;
3307 		handler(trace, evsel, event, sample);
3308 	}
3309 out:
3310 	thread__put(thread);
3311 	return err;
3312 }
3313 
3314 static int trace__record(struct trace *trace, int argc, const char **argv)
3315 {
3316 	unsigned int rec_argc, i, j;
3317 	const char **rec_argv;
3318 	const char * const record_args[] = {
3319 		"record",
3320 		"-R",
3321 		"-m", "1024",
3322 		"-c", "1",
3323 	};
3324 	pid_t pid = getpid();
3325 	char *filter = asprintf__tp_filter_pids(1, &pid);
3326 	const char * const sc_args[] = { "-e", };
3327 	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
3328 	const char * const majpf_args[] = { "-e", "major-faults" };
3329 	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
3330 	const char * const minpf_args[] = { "-e", "minor-faults" };
3331 	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
3332 	int err = -1;
3333 
3334 	/* +3 is for the event string below and the pid filter */
3335 	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3336 		majpf_args_nr + minpf_args_nr + argc;
3337 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3338 
3339 	if (rec_argv == NULL || filter == NULL)
3340 		goto out_free;
3341 
3342 	j = 0;
3343 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3344 		rec_argv[j++] = record_args[i];
3345 
3346 	if (trace->trace_syscalls) {
3347 		for (i = 0; i < sc_args_nr; i++)
3348 			rec_argv[j++] = sc_args[i];
3349 
3350 		/* event string may be different for older kernels - e.g., RHEL6 */
3351 		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3352 			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3353 		else if (is_valid_tracepoint("syscalls:sys_enter"))
3354 			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3355 		else {
3356 			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3357 			goto out_free;
3358 		}
3359 	}
3360 
3361 	rec_argv[j++] = "--filter";
3362 	rec_argv[j++] = filter;
3363 
3364 	if (trace->trace_pgfaults & TRACE_PFMAJ)
3365 		for (i = 0; i < majpf_args_nr; i++)
3366 			rec_argv[j++] = majpf_args[i];
3367 
3368 	if (trace->trace_pgfaults & TRACE_PFMIN)
3369 		for (i = 0; i < minpf_args_nr; i++)
3370 			rec_argv[j++] = minpf_args[i];
3371 
3372 	for (i = 0; i < (unsigned int)argc; i++)
3373 		rec_argv[j++] = argv[i];
3374 
3375 	err = cmd_record(j, rec_argv);
3376 out_free:
3377 	free(filter);
3378 	free(rec_argv);
3379 	return err;
3380 }
3381 
3382 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3383 
3384 static bool evlist__add_vfs_getname(struct evlist *evlist)
3385 {
3386 	bool found = false;
3387 	struct evsel *evsel, *tmp;
3388 	struct parse_events_error err;
3389 	int ret;
3390 
3391 	parse_events_error__init(&err);
3392 	ret = parse_events(evlist, "probe:vfs_getname*", &err);
3393 	parse_events_error__exit(&err);
3394 	if (ret)
3395 		return false;
3396 
3397 	evlist__for_each_entry_safe(evlist, evsel, tmp) {
3398 		if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3399 			continue;
3400 
3401 		if (evsel__field(evsel, "pathname")) {
3402 			evsel->handler = trace__vfs_getname;
3403 			found = true;
3404 			continue;
3405 		}
3406 
3407 		list_del_init(&evsel->core.node);
3408 		evsel->evlist = NULL;
3409 		evsel__delete(evsel);
3410 	}
3411 
3412 	return found;
3413 }
3414 
3415 static struct evsel *evsel__new_pgfault(u64 config)
3416 {
3417 	struct evsel *evsel;
3418 	struct perf_event_attr attr = {
3419 		.type = PERF_TYPE_SOFTWARE,
3420 		.mmap_data = 1,
3421 	};
3422 
3423 	attr.config = config;
3424 	attr.sample_period = 1;
3425 
3426 	event_attr_init(&attr);
3427 
3428 	evsel = evsel__new(&attr);
3429 	if (evsel)
3430 		evsel->handler = trace__pgfault;
3431 
3432 	return evsel;
3433 }
3434 
3435 static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3436 {
3437 	struct evsel *evsel;
3438 
3439 	evlist__for_each_entry(evlist, evsel) {
3440 		evsel_trace__delete(evsel->priv);
3441 		evsel->priv = NULL;
3442 	}
3443 }
3444 
3445 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3446 {
3447 	const u32 type = event->header.type;
3448 	struct evsel *evsel;
3449 
3450 	if (type != PERF_RECORD_SAMPLE) {
3451 		trace__process_event(trace, trace->host, event, sample);
3452 		return;
3453 	}
3454 
3455 	evsel = evlist__id2evsel(trace->evlist, sample->id);
3456 	if (evsel == NULL) {
3457 		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3458 		return;
3459 	}
3460 
3461 	if (evswitch__discard(&trace->evswitch, evsel))
3462 		return;
3463 
3464 	trace__set_base_time(trace, evsel, sample);
3465 
3466 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3467 	    sample->raw_data == NULL) {
3468 		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3469 		       evsel__name(evsel), sample->tid,
3470 		       sample->cpu, sample->raw_size);
3471 	} else {
3472 		tracepoint_handler handler = evsel->handler;
3473 		handler(trace, evsel, event, sample);
3474 	}
3475 
3476 	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3477 		interrupted = true;
3478 }
3479 
3480 static int trace__add_syscall_newtp(struct trace *trace)
3481 {
3482 	int ret = -1;
3483 	struct evlist *evlist = trace->evlist;
3484 	struct evsel *sys_enter, *sys_exit;
3485 
3486 	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3487 	if (sys_enter == NULL)
3488 		goto out;
3489 
3490 	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3491 		goto out_delete_sys_enter;
3492 
3493 	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3494 	if (sys_exit == NULL)
3495 		goto out_delete_sys_enter;
3496 
3497 	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3498 		goto out_delete_sys_exit;
3499 
3500 	evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3501 	evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3502 
3503 	evlist__add(evlist, sys_enter);
3504 	evlist__add(evlist, sys_exit);
3505 
3506 	if (callchain_param.enabled && !trace->kernel_syscallchains) {
3507 		/*
3508 		 * We're interested only in the user space callchain
3509 		 * leading to the syscall, allow overriding that for
3510 		 * debugging reasons using --kernel_syscall_callchains
3511 		 */
3512 		sys_exit->core.attr.exclude_callchain_kernel = 1;
3513 	}
3514 
3515 	trace->syscalls.events.sys_enter = sys_enter;
3516 	trace->syscalls.events.sys_exit  = sys_exit;
3517 
3518 	ret = 0;
3519 out:
3520 	return ret;
3521 
3522 out_delete_sys_exit:
3523 	evsel__delete_priv(sys_exit);
3524 out_delete_sys_enter:
3525 	evsel__delete_priv(sys_enter);
3526 	goto out;
3527 }
3528 
3529 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3530 {
3531 	int err = -1;
3532 	struct evsel *sys_exit;
3533 	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3534 						trace->ev_qualifier_ids.nr,
3535 						trace->ev_qualifier_ids.entries);
3536 
3537 	if (filter == NULL)
3538 		goto out_enomem;
3539 
3540 	if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3541 		sys_exit = trace->syscalls.events.sys_exit;
3542 		err = evsel__append_tp_filter(sys_exit, filter);
3543 	}
3544 
3545 	free(filter);
3546 out:
3547 	return err;
3548 out_enomem:
3549 	errno = ENOMEM;
3550 	goto out;
3551 }
3552 
3553 #ifdef HAVE_BPF_SKEL
3554 static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, struct btf *btf, char *type)
3555 {
3556        int id;
3557 
3558 	if (arg_fmt->type != NULL)
3559 		return -1;
3560 
3561        id = btf__find_by_name(btf, type);
3562        if (id < 0)
3563 		return -1;
3564 
3565        arg_fmt->type    = btf__type_by_id(btf, id);
3566        arg_fmt->type_id = id;
3567 
3568        return 0;
3569 }
3570 
3571 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3572 {
3573 	struct bpf_program *pos, *prog = NULL;
3574 	const char *sec_name;
3575 
3576 	if (trace->skel->obj == NULL)
3577 		return NULL;
3578 
3579 	bpf_object__for_each_program(pos, trace->skel->obj) {
3580 		sec_name = bpf_program__section_name(pos);
3581 		if (sec_name && !strcmp(sec_name, name)) {
3582 			prog = pos;
3583 			break;
3584 		}
3585 	}
3586 
3587 	return prog;
3588 }
3589 
3590 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3591 							const char *prog_name, const char *type)
3592 {
3593 	struct bpf_program *prog;
3594 
3595 	if (prog_name == NULL) {
3596 		char default_prog_name[256];
3597 		scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
3598 		prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3599 		if (prog != NULL)
3600 			goto out_found;
3601 		if (sc->fmt && sc->fmt->alias) {
3602 			scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
3603 			prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3604 			if (prog != NULL)
3605 				goto out_found;
3606 		}
3607 		goto out_unaugmented;
3608 	}
3609 
3610 	prog = trace__find_bpf_program_by_title(trace, prog_name);
3611 
3612 	if (prog != NULL) {
3613 out_found:
3614 		return prog;
3615 	}
3616 
3617 	pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3618 		 prog_name, type, sc->name);
3619 out_unaugmented:
3620 	return trace->skel->progs.syscall_unaugmented;
3621 }
3622 
3623 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3624 {
3625 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3626 
3627 	if (sc == NULL)
3628 		return;
3629 
3630 	sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3631 	sc->bpf_prog.sys_exit  = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit  : NULL,  "exit");
3632 }
3633 
3634 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3635 {
3636 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3637 	return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3638 }
3639 
3640 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3641 {
3642 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3643 	return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3644 }
3645 
3646 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array)
3647 {
3648 	struct tep_format_field *field;
3649 	struct syscall *sc = trace__syscall_info(trace, NULL, key);
3650 	const struct btf_type *bt;
3651 	char *struct_offset, *tmp, name[32];
3652 	bool can_augment = false;
3653 	int i, cnt;
3654 
3655 	if (sc == NULL)
3656 		return -1;
3657 
3658 	trace__load_vmlinux_btf(trace);
3659 	if (trace->btf == NULL)
3660 		return -1;
3661 
3662 	for (i = 0, field = sc->args; field; ++i, field = field->next) {
3663 		// XXX We're only collecting pointer payloads _from_ user space
3664 		if (!sc->arg_fmt[i].from_user)
3665 			continue;
3666 
3667 		struct_offset = strstr(field->type, "struct ");
3668 		if (struct_offset == NULL)
3669 			struct_offset = strstr(field->type, "union ");
3670 		else
3671 			struct_offset++; // "union" is shorter
3672 
3673 		if (field->flags & TEP_FIELD_IS_POINTER && struct_offset) { /* struct or union (think BPF's attr arg) */
3674 			struct_offset += 6;
3675 
3676 			/* for 'struct foo *', we only want 'foo' */
3677 			for (tmp = struct_offset, cnt = 0; *tmp != ' ' && *tmp != '\0'; ++tmp, ++cnt) {
3678 			}
3679 
3680 			strncpy(name, struct_offset, cnt);
3681 			name[cnt] = '\0';
3682 
3683 			/* cache struct's btf_type and type_id */
3684 			if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name))
3685 				continue;
3686 
3687 			bt = sc->arg_fmt[i].type;
3688 			beauty_array[i] = bt->size;
3689 			can_augment = true;
3690 		} else if (field->flags & TEP_FIELD_IS_POINTER && /* string */
3691 			   strcmp(field->type, "const char *") == 0 &&
3692 			   (strstr(field->name, "name") ||
3693 			    strstr(field->name, "path") ||
3694 			    strstr(field->name, "file") ||
3695 			    strstr(field->name, "root") ||
3696 			    strstr(field->name, "key") ||
3697 			    strstr(field->name, "special") ||
3698 			    strstr(field->name, "type") ||
3699 			    strstr(field->name, "description"))) {
3700 			beauty_array[i] = 1;
3701 			can_augment = true;
3702 		} else if (field->flags & TEP_FIELD_IS_POINTER && /* buffer */
3703 			   strstr(field->type, "char *") &&
3704 			   (strstr(field->name, "buf") ||
3705 			    strstr(field->name, "val") ||
3706 			    strstr(field->name, "msg"))) {
3707 			int j;
3708 			struct tep_format_field *field_tmp;
3709 
3710 			/* find the size of the buffer that appears in pairs with buf */
3711 			for (j = 0, field_tmp = sc->args; field_tmp; ++j, field_tmp = field_tmp->next) {
3712 				if (!(field_tmp->flags & TEP_FIELD_IS_POINTER) && /* only integers */
3713 				    (strstr(field_tmp->name, "count") ||
3714 				     strstr(field_tmp->name, "siz") ||  /* size, bufsiz */
3715 				     (strstr(field_tmp->name, "len") && strcmp(field_tmp->name, "filename")))) {
3716 					 /* filename's got 'len' in it, we don't want that */
3717 					beauty_array[i] = -(j + 1);
3718 					can_augment = true;
3719 					break;
3720 				}
3721 			}
3722 		}
3723 	}
3724 
3725 	if (can_augment)
3726 		return 0;
3727 
3728 	return -1;
3729 }
3730 
3731 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3732 {
3733 	struct tep_format_field *field, *candidate_field;
3734 	/*
3735 	 * We're only interested in syscalls that have a pointer:
3736 	 */
3737 	for (field = sc->args; field; field = field->next) {
3738 		if (field->flags & TEP_FIELD_IS_POINTER)
3739 			goto try_to_find_pair;
3740 	}
3741 
3742 	return NULL;
3743 
3744 try_to_find_pair:
3745 	for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3746 		int id = syscalltbl__id_at_idx(trace->sctbl, i);
3747 		struct syscall *pair = trace__syscall_info(trace, NULL, id);
3748 		struct bpf_program *pair_prog;
3749 		bool is_candidate = false;
3750 
3751 		if (pair == NULL || pair == sc ||
3752 		    pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3753 			continue;
3754 
3755 		for (field = sc->args, candidate_field = pair->args;
3756 		     field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3757 			bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3758 			     candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3759 
3760 			if (is_pointer) {
3761 			       if (!candidate_is_pointer) {
3762 					// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3763 					continue;
3764 			       }
3765 			} else {
3766 				if (candidate_is_pointer) {
3767 					// The candidate might copy a pointer we don't have, skip it.
3768 					goto next_candidate;
3769 				}
3770 				continue;
3771 			}
3772 
3773 			if (strcmp(field->type, candidate_field->type))
3774 				goto next_candidate;
3775 
3776 			/*
3777 			 * This is limited in the BPF program but sys_write
3778 			 * uses "const char *" for its "buf" arg so we need to
3779 			 * use some heuristic that is kinda future proof...
3780 			 */
3781 			if (strcmp(field->type, "const char *") == 0 &&
3782 			    !(strstr(field->name, "name") ||
3783 			      strstr(field->name, "path") ||
3784 			      strstr(field->name, "file") ||
3785 			      strstr(field->name, "root") ||
3786 			      strstr(field->name, "description")))
3787 				goto next_candidate;
3788 
3789 			is_candidate = true;
3790 		}
3791 
3792 		if (!is_candidate)
3793 			goto next_candidate;
3794 
3795 		/*
3796 		 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3797 		 * then it may be collecting that and we then can't use it, as it would collect
3798 		 * more than what is common to the two syscalls.
3799 		 */
3800 		if (candidate_field) {
3801 			for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3802 				if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3803 					goto next_candidate;
3804 		}
3805 
3806 		pair_prog = pair->bpf_prog.sys_enter;
3807 		/*
3808 		 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3809 		 * have been searched for, so search it here and if it returns the
3810 		 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3811 		 * program for a filtered syscall on a non-filtered one.
3812 		 *
3813 		 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3814 		 * useful for "renameat2".
3815 		 */
3816 		if (pair_prog == NULL) {
3817 			pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3818 			if (pair_prog == trace->skel->progs.syscall_unaugmented)
3819 				goto next_candidate;
3820 		}
3821 
3822 		pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3823 		return pair_prog;
3824 	next_candidate:
3825 		continue;
3826 	}
3827 
3828 	return NULL;
3829 }
3830 
3831 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3832 {
3833 	int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3834 	int map_exit_fd  = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3835 	int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
3836 	int err = 0;
3837 	unsigned int beauty_array[6];
3838 
3839 	for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3840 		int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
3841 
3842 		if (!trace__syscall_enabled(trace, key))
3843 			continue;
3844 
3845 		trace__init_syscall_bpf_progs(trace, key);
3846 
3847 		// It'll get at least the "!raw_syscalls:unaugmented"
3848 		prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3849 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3850 		if (err)
3851 			break;
3852 		prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3853 		err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3854 		if (err)
3855 			break;
3856 
3857 		/* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */
3858 		memset(beauty_array, 0, sizeof(beauty_array));
3859 		err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array);
3860 		if (err)
3861 			continue;
3862 		err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY);
3863 		if (err)
3864 			break;
3865 	}
3866 
3867 	/*
3868 	 * Now lets do a second pass looking for enabled syscalls without
3869 	 * an augmenter that have a signature that is a superset of another
3870 	 * syscall with an augmenter so that we can auto-reuse it.
3871 	 *
3872 	 * I.e. if we have an augmenter for the "open" syscall that has
3873 	 * this signature:
3874 	 *
3875 	 *   int open(const char *pathname, int flags, mode_t mode);
3876 	 *
3877 	 * I.e. that will collect just the first string argument, then we
3878 	 * can reuse it for the 'creat' syscall, that has this signature:
3879 	 *
3880 	 *   int creat(const char *pathname, mode_t mode);
3881 	 *
3882 	 * and for:
3883 	 *
3884 	 *   int stat(const char *pathname, struct stat *statbuf);
3885 	 *   int lstat(const char *pathname, struct stat *statbuf);
3886 	 *
3887 	 * Because the 'open' augmenter will collect the first arg as a string,
3888 	 * and leave alone all the other args, which already helps with
3889 	 * beautifying 'stat' and 'lstat''s pathname arg.
3890 	 *
3891 	 * Then, in time, when 'stat' gets an augmenter that collects both
3892 	 * first and second arg (this one on the raw_syscalls:sys_exit prog
3893 	 * array tail call, then that one will be used.
3894 	 */
3895 	for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3896 		int key = syscalltbl__id_at_idx(trace->sctbl, i);
3897 		struct syscall *sc = trace__syscall_info(trace, NULL, key);
3898 		struct bpf_program *pair_prog;
3899 		int prog_fd;
3900 
3901 		if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3902 			continue;
3903 
3904 		/*
3905 		 * For now we're just reusing the sys_enter prog, and if it
3906 		 * already has an augmenter, we don't need to find one.
3907 		 */
3908 		if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
3909 			continue;
3910 
3911 		/*
3912 		 * Look at all the other syscalls for one that has a signature
3913 		 * that is close enough that we can share:
3914 		 */
3915 		pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3916 		if (pair_prog == NULL)
3917 			continue;
3918 
3919 		sc->bpf_prog.sys_enter = pair_prog;
3920 
3921 		/*
3922 		 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3923 		 * with the fd for the program we're reusing:
3924 		 */
3925 		prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3926 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3927 		if (err)
3928 			break;
3929 	}
3930 
3931 	return err;
3932 }
3933 #endif // HAVE_BPF_SKEL
3934 
3935 static int trace__set_ev_qualifier_filter(struct trace *trace)
3936 {
3937 	if (trace->syscalls.events.sys_enter)
3938 		return trace__set_ev_qualifier_tp_filter(trace);
3939 	return 0;
3940 }
3941 
3942 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3943 				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3944 {
3945 	int err = 0;
3946 #ifdef HAVE_LIBBPF_SUPPORT
3947 	bool value = true;
3948 	int map_fd = bpf_map__fd(map);
3949 	size_t i;
3950 
3951 	for (i = 0; i < npids; ++i) {
3952 		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3953 		if (err)
3954 			break;
3955 	}
3956 #endif
3957 	return err;
3958 }
3959 
3960 static int trace__set_filter_loop_pids(struct trace *trace)
3961 {
3962 	unsigned int nr = 1, err;
3963 	pid_t pids[32] = {
3964 		getpid(),
3965 	};
3966 	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3967 
3968 	while (thread && nr < ARRAY_SIZE(pids)) {
3969 		struct thread *parent = machine__find_thread(trace->host,
3970 							     thread__ppid(thread),
3971 							     thread__ppid(thread));
3972 
3973 		if (parent == NULL)
3974 			break;
3975 
3976 		if (!strcmp(thread__comm_str(parent), "sshd") ||
3977 		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
3978 			pids[nr++] = thread__tid(parent);
3979 			break;
3980 		}
3981 		thread = parent;
3982 	}
3983 
3984 	err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3985 	if (!err && trace->filter_pids.map)
3986 		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3987 
3988 	return err;
3989 }
3990 
3991 static int trace__set_filter_pids(struct trace *trace)
3992 {
3993 	int err = 0;
3994 	/*
3995 	 * Better not use !target__has_task() here because we need to cover the
3996 	 * case where no threads were specified in the command line, but a
3997 	 * workload was, and in that case we will fill in the thread_map when
3998 	 * we fork the workload in evlist__prepare_workload.
3999 	 */
4000 	if (trace->filter_pids.nr > 0) {
4001 		err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
4002 						    trace->filter_pids.entries);
4003 		if (!err && trace->filter_pids.map) {
4004 			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
4005 						       trace->filter_pids.entries);
4006 		}
4007 	} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
4008 		err = trace__set_filter_loop_pids(trace);
4009 	}
4010 
4011 	return err;
4012 }
4013 
4014 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
4015 {
4016 	struct evlist *evlist = trace->evlist;
4017 	struct perf_sample sample;
4018 	int err = evlist__parse_sample(evlist, event, &sample);
4019 
4020 	if (err)
4021 		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
4022 	else
4023 		trace__handle_event(trace, event, &sample);
4024 
4025 	return 0;
4026 }
4027 
4028 static int __trace__flush_events(struct trace *trace)
4029 {
4030 	u64 first = ordered_events__first_time(&trace->oe.data);
4031 	u64 flush = trace->oe.last - NSEC_PER_SEC;
4032 
4033 	/* Is there some thing to flush.. */
4034 	if (first && first < flush)
4035 		return ordered_events__flush_time(&trace->oe.data, flush);
4036 
4037 	return 0;
4038 }
4039 
4040 static int trace__flush_events(struct trace *trace)
4041 {
4042 	return !trace->sort_events ? 0 : __trace__flush_events(trace);
4043 }
4044 
4045 static int trace__deliver_event(struct trace *trace, union perf_event *event)
4046 {
4047 	int err;
4048 
4049 	if (!trace->sort_events)
4050 		return __trace__deliver_event(trace, event);
4051 
4052 	err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
4053 	if (err && err != -1)
4054 		return err;
4055 
4056 	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
4057 	if (err)
4058 		return err;
4059 
4060 	return trace__flush_events(trace);
4061 }
4062 
4063 static int ordered_events__deliver_event(struct ordered_events *oe,
4064 					 struct ordered_event *event)
4065 {
4066 	struct trace *trace = container_of(oe, struct trace, oe.data);
4067 
4068 	return __trace__deliver_event(trace, event->event);
4069 }
4070 
4071 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg,
4072 								   char **type)
4073 {
4074 	struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
4075 	const struct tep_event *tp_format;
4076 
4077 	if (!fmt)
4078 		return NULL;
4079 
4080 	tp_format = evsel__tp_format(evsel);
4081 	if (!tp_format)
4082 		return NULL;
4083 
4084 	for (const struct tep_format_field *field = tp_format->format.fields; field;
4085 	     field = field->next, ++fmt) {
4086 		if (strcmp(field->name, arg) == 0) {
4087 			*type = field->type;
4088 			return fmt;
4089 		}
4090 	}
4091 
4092 	return NULL;
4093 }
4094 
4095 static int trace__expand_filter(struct trace *trace, struct evsel *evsel)
4096 {
4097 	char *tok, *left = evsel->filter, *new_filter = evsel->filter;
4098 
4099 	while ((tok = strpbrk(left, "=<>!")) != NULL) {
4100 		char *right = tok + 1, *right_end;
4101 
4102 		if (*right == '=')
4103 			++right;
4104 
4105 		while (isspace(*right))
4106 			++right;
4107 
4108 		if (*right == '\0')
4109 			break;
4110 
4111 		while (!isalpha(*left))
4112 			if (++left == tok) {
4113 				/*
4114 				 * Bail out, can't find the name of the argument that is being
4115 				 * used in the filter, let it try to set this filter, will fail later.
4116 				 */
4117 				return 0;
4118 			}
4119 
4120 		right_end = right + 1;
4121 		while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
4122 			++right_end;
4123 
4124 		if (isalpha(*right)) {
4125 			struct syscall_arg_fmt *fmt;
4126 			int left_size = tok - left,
4127 			    right_size = right_end - right;
4128 			char arg[128], *type;
4129 
4130 			while (isspace(left[left_size - 1]))
4131 				--left_size;
4132 
4133 			scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
4134 
4135 			fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg, &type);
4136 			if (fmt == NULL) {
4137 				pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
4138 				       arg, evsel->name, evsel->filter);
4139 				return -1;
4140 			}
4141 
4142 			pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
4143 				 arg, (int)(right - tok), tok, right_size, right);
4144 
4145 			if (fmt->strtoul) {
4146 				u64 val;
4147 				struct syscall_arg syscall_arg = {
4148 					.trace = trace,
4149 					.fmt   = fmt,
4150 					.type_name = type,
4151 					.parm = fmt->parm,
4152 				};
4153 
4154 				if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
4155 					char *n, expansion[19];
4156 					int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
4157 					int expansion_offset = right - new_filter;
4158 
4159 					pr_debug("%s", expansion);
4160 
4161 					if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
4162 						pr_debug(" out of memory!\n");
4163 						free(new_filter);
4164 						return -1;
4165 					}
4166 					if (new_filter != evsel->filter)
4167 						free(new_filter);
4168 					left = n + expansion_offset + expansion_lenght;
4169 					new_filter = n;
4170 				} else {
4171 					pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
4172 					       right_size, right, arg, evsel->name, evsel->filter);
4173 					return -1;
4174 				}
4175 			} else {
4176 				pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
4177 				       arg, evsel->name, evsel->filter);
4178 				return -1;
4179 			}
4180 
4181 			pr_debug("\n");
4182 		} else {
4183 			left = right_end;
4184 		}
4185 	}
4186 
4187 	if (new_filter != evsel->filter) {
4188 		pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
4189 		evsel__set_filter(evsel, new_filter);
4190 		free(new_filter);
4191 	}
4192 
4193 	return 0;
4194 }
4195 
4196 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
4197 {
4198 	struct evlist *evlist = trace->evlist;
4199 	struct evsel *evsel;
4200 
4201 	evlist__for_each_entry(evlist, evsel) {
4202 		if (evsel->filter == NULL)
4203 			continue;
4204 
4205 		if (trace__expand_filter(trace, evsel)) {
4206 			*err_evsel = evsel;
4207 			return -1;
4208 		}
4209 	}
4210 
4211 	return 0;
4212 }
4213 
4214 static int trace__run(struct trace *trace, int argc, const char **argv)
4215 {
4216 	struct evlist *evlist = trace->evlist;
4217 	struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
4218 	int err = -1, i;
4219 	unsigned long before;
4220 	const bool forks = argc > 0;
4221 	bool draining = false;
4222 
4223 	trace->live = true;
4224 
4225 	if (!trace->raw_augmented_syscalls) {
4226 		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
4227 			goto out_error_raw_syscalls;
4228 
4229 		if (trace->trace_syscalls)
4230 			trace->vfs_getname = evlist__add_vfs_getname(evlist);
4231 	}
4232 
4233 	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
4234 		pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
4235 		if (pgfault_maj == NULL)
4236 			goto out_error_mem;
4237 		evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
4238 		evlist__add(evlist, pgfault_maj);
4239 	}
4240 
4241 	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
4242 		pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
4243 		if (pgfault_min == NULL)
4244 			goto out_error_mem;
4245 		evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
4246 		evlist__add(evlist, pgfault_min);
4247 	}
4248 
4249 	/* Enable ignoring missing threads when -u/-p option is defined. */
4250 	trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
4251 
4252 	if (trace->sched &&
4253 	    evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
4254 		goto out_error_sched_stat_runtime;
4255 	/*
4256 	 * If a global cgroup was set, apply it to all the events without an
4257 	 * explicit cgroup. I.e.:
4258 	 *
4259 	 * 	trace -G A -e sched:*switch
4260 	 *
4261 	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
4262 	 * _and_ sched:sched_switch to the 'A' cgroup, while:
4263 	 *
4264 	 * trace -e sched:*switch -G A
4265 	 *
4266 	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
4267 	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
4268 	 * a cgroup (on the root cgroup, sys wide, etc).
4269 	 *
4270 	 * Multiple cgroups:
4271 	 *
4272 	 * trace -G A -e sched:*switch -G B
4273 	 *
4274 	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
4275 	 * to the 'B' cgroup.
4276 	 *
4277 	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
4278 	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
4279 	 */
4280 	if (trace->cgroup)
4281 		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
4282 
4283 	err = evlist__create_maps(evlist, &trace->opts.target);
4284 	if (err < 0) {
4285 		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
4286 		goto out_delete_evlist;
4287 	}
4288 
4289 	err = trace__symbols_init(trace, evlist);
4290 	if (err < 0) {
4291 		fprintf(trace->output, "Problems initializing symbol libraries!\n");
4292 		goto out_delete_evlist;
4293 	}
4294 
4295 	evlist__config(evlist, &trace->opts, &callchain_param);
4296 
4297 	if (forks) {
4298 		err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4299 		if (err < 0) {
4300 			fprintf(trace->output, "Couldn't run the workload!\n");
4301 			goto out_delete_evlist;
4302 		}
4303 		workload_pid = evlist->workload.pid;
4304 	}
4305 
4306 	err = evlist__open(evlist);
4307 	if (err < 0)
4308 		goto out_error_open;
4309 #ifdef HAVE_BPF_SKEL
4310 	if (trace->syscalls.events.bpf_output) {
4311 		struct perf_cpu cpu;
4312 
4313 		/*
4314 		 * Set up the __augmented_syscalls__ BPF map to hold for each
4315 		 * CPU the bpf-output event's file descriptor.
4316 		 */
4317 		perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
4318 			bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
4319 					&cpu.cpu, sizeof(int),
4320 					xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
4321 						       cpu.cpu, 0),
4322 					sizeof(__u32), BPF_ANY);
4323 		}
4324 	}
4325 
4326 	if (trace->skel)
4327 		trace->filter_pids.map = trace->skel->maps.pids_filtered;
4328 #endif
4329 	err = trace__set_filter_pids(trace);
4330 	if (err < 0)
4331 		goto out_error_mem;
4332 
4333 #ifdef HAVE_BPF_SKEL
4334 	if (trace->skel && trace->skel->progs.sys_enter)
4335 		trace__init_syscalls_bpf_prog_array_maps(trace);
4336 #endif
4337 
4338 	if (trace->ev_qualifier_ids.nr > 0) {
4339 		err = trace__set_ev_qualifier_filter(trace);
4340 		if (err < 0)
4341 			goto out_errno;
4342 
4343 		if (trace->syscalls.events.sys_exit) {
4344 			pr_debug("event qualifier tracepoint filter: %s\n",
4345 				 trace->syscalls.events.sys_exit->filter);
4346 		}
4347 	}
4348 
4349 	/*
4350 	 * If the "close" syscall is not traced, then we will not have the
4351 	 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
4352 	 * fd->pathname table and were ending up showing the last value set by
4353 	 * syscalls opening a pathname and associating it with a descriptor or
4354 	 * reading it from /proc/pid/fd/ in cases where that doesn't make
4355 	 * sense.
4356 	 *
4357 	 *  So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
4358 	 *  not in use.
4359 	 */
4360 	trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4361 
4362 	err = trace__expand_filters(trace, &evsel);
4363 	if (err)
4364 		goto out_delete_evlist;
4365 	err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
4366 	if (err < 0)
4367 		goto out_error_apply_filters;
4368 
4369 	err = evlist__mmap(evlist, trace->opts.mmap_pages);
4370 	if (err < 0)
4371 		goto out_error_mmap;
4372 
4373 	if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
4374 		evlist__enable(evlist);
4375 
4376 	if (forks)
4377 		evlist__start_workload(evlist);
4378 
4379 	if (trace->opts.target.initial_delay) {
4380 		usleep(trace->opts.target.initial_delay * 1000);
4381 		evlist__enable(evlist);
4382 	}
4383 
4384 	trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4385 		perf_thread_map__nr(evlist->core.threads) > 1 ||
4386 		evlist__first(evlist)->core.attr.inherit;
4387 
4388 	/*
4389 	 * Now that we already used evsel->core.attr to ask the kernel to setup the
4390 	 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4391 	 * trace__resolve_callchain(), allowing per-event max-stack settings
4392 	 * to override an explicitly set --max-stack global setting.
4393 	 */
4394 	evlist__for_each_entry(evlist, evsel) {
4395 		if (evsel__has_callchain(evsel) &&
4396 		    evsel->core.attr.sample_max_stack == 0)
4397 			evsel->core.attr.sample_max_stack = trace->max_stack;
4398 	}
4399 again:
4400 	before = trace->nr_events;
4401 
4402 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
4403 		union perf_event *event;
4404 		struct mmap *md;
4405 
4406 		md = &evlist->mmap[i];
4407 		if (perf_mmap__read_init(&md->core) < 0)
4408 			continue;
4409 
4410 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4411 			++trace->nr_events;
4412 
4413 			err = trace__deliver_event(trace, event);
4414 			if (err)
4415 				goto out_disable;
4416 
4417 			perf_mmap__consume(&md->core);
4418 
4419 			if (interrupted)
4420 				goto out_disable;
4421 
4422 			if (done && !draining) {
4423 				evlist__disable(evlist);
4424 				draining = true;
4425 			}
4426 		}
4427 		perf_mmap__read_done(&md->core);
4428 	}
4429 
4430 	if (trace->nr_events == before) {
4431 		int timeout = done ? 100 : -1;
4432 
4433 		if (!draining && evlist__poll(evlist, timeout) > 0) {
4434 			if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4435 				draining = true;
4436 
4437 			goto again;
4438 		} else {
4439 			if (trace__flush_events(trace))
4440 				goto out_disable;
4441 		}
4442 	} else {
4443 		goto again;
4444 	}
4445 
4446 out_disable:
4447 	thread__zput(trace->current);
4448 
4449 	evlist__disable(evlist);
4450 
4451 	if (trace->sort_events)
4452 		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4453 
4454 	if (!err) {
4455 		if (trace->summary)
4456 			trace__fprintf_thread_summary(trace, trace->output);
4457 
4458 		if (trace->show_tool_stats) {
4459 			fprintf(trace->output, "Stats:\n "
4460 					       " vfs_getname : %" PRIu64 "\n"
4461 					       " proc_getname: %" PRIu64 "\n",
4462 				trace->stats.vfs_getname,
4463 				trace->stats.proc_getname);
4464 		}
4465 	}
4466 
4467 out_delete_evlist:
4468 	trace__symbols__exit(trace);
4469 	evlist__free_syscall_tp_fields(evlist);
4470 	evlist__delete(evlist);
4471 	cgroup__put(trace->cgroup);
4472 	trace->evlist = NULL;
4473 	trace->live = false;
4474 	return err;
4475 {
4476 	char errbuf[BUFSIZ];
4477 
4478 out_error_sched_stat_runtime:
4479 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4480 	goto out_error;
4481 
4482 out_error_raw_syscalls:
4483 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4484 	goto out_error;
4485 
4486 out_error_mmap:
4487 	evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4488 	goto out_error;
4489 
4490 out_error_open:
4491 	evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4492 
4493 out_error:
4494 	fprintf(trace->output, "%s\n", errbuf);
4495 	goto out_delete_evlist;
4496 
4497 out_error_apply_filters:
4498 	fprintf(trace->output,
4499 		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
4500 		evsel->filter, evsel__name(evsel), errno,
4501 		str_error_r(errno, errbuf, sizeof(errbuf)));
4502 	goto out_delete_evlist;
4503 }
4504 out_error_mem:
4505 	fprintf(trace->output, "Not enough memory to run!\n");
4506 	goto out_delete_evlist;
4507 
4508 out_errno:
4509 	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4510 	goto out_delete_evlist;
4511 }
4512 
4513 static int trace__replay(struct trace *trace)
4514 {
4515 	const struct evsel_str_handler handlers[] = {
4516 		{ "probe:vfs_getname",	     trace__vfs_getname, },
4517 	};
4518 	struct perf_data data = {
4519 		.path  = input_name,
4520 		.mode  = PERF_DATA_MODE_READ,
4521 		.force = trace->force,
4522 	};
4523 	struct perf_session *session;
4524 	struct evsel *evsel;
4525 	int err = -1;
4526 
4527 	trace->tool.sample	  = trace__process_sample;
4528 	trace->tool.mmap	  = perf_event__process_mmap;
4529 	trace->tool.mmap2	  = perf_event__process_mmap2;
4530 	trace->tool.comm	  = perf_event__process_comm;
4531 	trace->tool.exit	  = perf_event__process_exit;
4532 	trace->tool.fork	  = perf_event__process_fork;
4533 	trace->tool.attr	  = perf_event__process_attr;
4534 	trace->tool.tracing_data  = perf_event__process_tracing_data;
4535 	trace->tool.build_id	  = perf_event__process_build_id;
4536 	trace->tool.namespaces	  = perf_event__process_namespaces;
4537 
4538 	trace->tool.ordered_events = true;
4539 	trace->tool.ordering_requires_timestamps = true;
4540 
4541 	/* add tid to output */
4542 	trace->multiple_threads = true;
4543 
4544 	session = perf_session__new(&data, &trace->tool);
4545 	if (IS_ERR(session))
4546 		return PTR_ERR(session);
4547 
4548 	if (trace->opts.target.pid)
4549 		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4550 
4551 	if (trace->opts.target.tid)
4552 		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4553 
4554 	if (symbol__init(&session->header.env) < 0)
4555 		goto out;
4556 
4557 	trace->host = &session->machines.host;
4558 
4559 	err = perf_session__set_tracepoints_handlers(session, handlers);
4560 	if (err)
4561 		goto out;
4562 
4563 	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4564 	trace->syscalls.events.sys_enter = evsel;
4565 	/* older kernels have syscalls tp versus raw_syscalls */
4566 	if (evsel == NULL)
4567 		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4568 
4569 	if (evsel &&
4570 	    (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4571 	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4572 		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4573 		goto out;
4574 	}
4575 
4576 	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4577 	trace->syscalls.events.sys_exit = evsel;
4578 	if (evsel == NULL)
4579 		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4580 	if (evsel &&
4581 	    (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4582 	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4583 		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4584 		goto out;
4585 	}
4586 
4587 	evlist__for_each_entry(session->evlist, evsel) {
4588 		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4589 		    (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4590 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4591 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4592 			evsel->handler = trace__pgfault;
4593 	}
4594 
4595 	setup_pager();
4596 
4597 	err = perf_session__process_events(session);
4598 	if (err)
4599 		pr_err("Failed to process events, error %d", err);
4600 
4601 	else if (trace->summary)
4602 		trace__fprintf_thread_summary(trace, trace->output);
4603 
4604 out:
4605 	perf_session__delete(session);
4606 
4607 	return err;
4608 }
4609 
4610 static size_t trace__fprintf_threads_header(FILE *fp)
4611 {
4612 	size_t printed;
4613 
4614 	printed  = fprintf(fp, "\n Summary of events:\n\n");
4615 
4616 	return printed;
4617 }
4618 
4619 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4620 	struct syscall_stats *stats;
4621 	double		     msecs;
4622 	int		     syscall;
4623 )
4624 {
4625 	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4626 	struct syscall_stats *stats = source->priv;
4627 
4628 	entry->syscall = source->i;
4629 	entry->stats   = stats;
4630 	entry->msecs   = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4631 }
4632 
4633 static size_t thread__dump_stats(struct thread_trace *ttrace,
4634 				 struct trace *trace, FILE *fp)
4635 {
4636 	size_t printed = 0;
4637 	struct syscall *sc;
4638 	struct rb_node *nd;
4639 	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4640 
4641 	if (syscall_stats == NULL)
4642 		return 0;
4643 
4644 	printed += fprintf(fp, "\n");
4645 
4646 	printed += fprintf(fp, "   syscall            calls  errors  total       min       avg       max       stddev\n");
4647 	printed += fprintf(fp, "                                     (msec)    (msec)    (msec)    (msec)        (%%)\n");
4648 	printed += fprintf(fp, "   --------------- --------  ------ -------- --------- --------- ---------     ------\n");
4649 
4650 	resort_rb__for_each_entry(nd, syscall_stats) {
4651 		struct syscall_stats *stats = syscall_stats_entry->stats;
4652 		if (stats) {
4653 			double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4654 			double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4655 			double avg = avg_stats(&stats->stats);
4656 			double pct;
4657 			u64 n = (u64)stats->stats.n;
4658 
4659 			pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4660 			avg /= NSEC_PER_MSEC;
4661 
4662 			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4663 			printed += fprintf(fp, "   %-15s", sc->name);
4664 			printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4665 					   n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4666 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4667 
4668 			if (trace->errno_summary && stats->nr_failures) {
4669 				int e;
4670 
4671 				for (e = 0; e < stats->max_errno; ++e) {
4672 					if (stats->errnos[e] != 0)
4673 						fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
4674 				}
4675 			}
4676 		}
4677 	}
4678 
4679 	resort_rb__delete(syscall_stats);
4680 	printed += fprintf(fp, "\n\n");
4681 
4682 	return printed;
4683 }
4684 
4685 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4686 {
4687 	size_t printed = 0;
4688 	struct thread_trace *ttrace = thread__priv(thread);
4689 	double ratio;
4690 
4691 	if (ttrace == NULL)
4692 		return 0;
4693 
4694 	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4695 
4696 	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
4697 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4698 	printed += fprintf(fp, "%.1f%%", ratio);
4699 	if (ttrace->pfmaj)
4700 		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4701 	if (ttrace->pfmin)
4702 		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4703 	if (trace->sched)
4704 		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4705 	else if (fputc('\n', fp) != EOF)
4706 		++printed;
4707 
4708 	printed += thread__dump_stats(ttrace, trace, fp);
4709 
4710 	return printed;
4711 }
4712 
4713 static unsigned long thread__nr_events(struct thread_trace *ttrace)
4714 {
4715 	return ttrace ? ttrace->nr_events : 0;
4716 }
4717 
4718 static int trace_nr_events_cmp(void *priv __maybe_unused,
4719 			       const struct list_head *la,
4720 			       const struct list_head *lb)
4721 {
4722 	struct thread_list *a = list_entry(la, struct thread_list, list);
4723 	struct thread_list *b = list_entry(lb, struct thread_list, list);
4724 	unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread));
4725 	unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread));
4726 
4727 	if (a_nr_events != b_nr_events)
4728 		return a_nr_events < b_nr_events ? -1 : 1;
4729 
4730 	/* Identical number of threads, place smaller tids first. */
4731 	return thread__tid(a->thread) < thread__tid(b->thread)
4732 		? -1
4733 		: (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0);
4734 }
4735 
4736 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4737 {
4738 	size_t printed = trace__fprintf_threads_header(fp);
4739 	LIST_HEAD(threads);
4740 
4741 	if (machine__thread_list(trace->host, &threads) == 0) {
4742 		struct thread_list *pos;
4743 
4744 		list_sort(NULL, &threads, trace_nr_events_cmp);
4745 
4746 		list_for_each_entry(pos, &threads, list)
4747 			printed += trace__fprintf_thread(fp, pos->thread, trace);
4748 	}
4749 	thread_list__delete(&threads);
4750 	return printed;
4751 }
4752 
4753 static int trace__set_duration(const struct option *opt, const char *str,
4754 			       int unset __maybe_unused)
4755 {
4756 	struct trace *trace = opt->value;
4757 
4758 	trace->duration_filter = atof(str);
4759 	return 0;
4760 }
4761 
4762 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4763 					      int unset __maybe_unused)
4764 {
4765 	int ret = -1;
4766 	size_t i;
4767 	struct trace *trace = opt->value;
4768 	/*
4769 	 * FIXME: introduce a intarray class, plain parse csv and create a
4770 	 * { int nr, int entries[] } struct...
4771 	 */
4772 	struct intlist *list = intlist__new(str);
4773 
4774 	if (list == NULL)
4775 		return -1;
4776 
4777 	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4778 	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4779 
4780 	if (trace->filter_pids.entries == NULL)
4781 		goto out;
4782 
4783 	trace->filter_pids.entries[0] = getpid();
4784 
4785 	for (i = 1; i < trace->filter_pids.nr; ++i)
4786 		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4787 
4788 	intlist__delete(list);
4789 	ret = 0;
4790 out:
4791 	return ret;
4792 }
4793 
4794 static int trace__open_output(struct trace *trace, const char *filename)
4795 {
4796 	struct stat st;
4797 
4798 	if (!stat(filename, &st) && st.st_size) {
4799 		char oldname[PATH_MAX];
4800 
4801 		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4802 		unlink(oldname);
4803 		rename(filename, oldname);
4804 	}
4805 
4806 	trace->output = fopen(filename, "w");
4807 
4808 	return trace->output == NULL ? -errno : 0;
4809 }
4810 
4811 static int parse_pagefaults(const struct option *opt, const char *str,
4812 			    int unset __maybe_unused)
4813 {
4814 	int *trace_pgfaults = opt->value;
4815 
4816 	if (strcmp(str, "all") == 0)
4817 		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4818 	else if (strcmp(str, "maj") == 0)
4819 		*trace_pgfaults |= TRACE_PFMAJ;
4820 	else if (strcmp(str, "min") == 0)
4821 		*trace_pgfaults |= TRACE_PFMIN;
4822 	else
4823 		return -1;
4824 
4825 	return 0;
4826 }
4827 
4828 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4829 {
4830 	struct evsel *evsel;
4831 
4832 	evlist__for_each_entry(evlist, evsel) {
4833 		if (evsel->handler == NULL)
4834 			evsel->handler = handler;
4835 	}
4836 }
4837 
4838 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4839 {
4840 	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4841 
4842 	if (fmt) {
4843 		const struct syscall_fmt *scfmt = syscall_fmt__find(name);
4844 
4845 		if (scfmt) {
4846 			const struct tep_event *tp_format = evsel__tp_format(evsel);
4847 
4848 			if (tp_format) {
4849 				int skip = 0;
4850 
4851 				if (strcmp(tp_format->format.fields->name, "__syscall_nr") == 0 ||
4852 				    strcmp(tp_format->format.fields->name, "nr") == 0)
4853 					++skip;
4854 
4855 				memcpy(fmt + skip, scfmt->arg,
4856 				       (tp_format->format.nr_fields - skip) * sizeof(*fmt));
4857 			}
4858 		}
4859 	}
4860 }
4861 
4862 static int evlist__set_syscall_tp_fields(struct evlist *evlist, bool *use_btf)
4863 {
4864 	struct evsel *evsel;
4865 
4866 	evlist__for_each_entry(evlist, evsel) {
4867 		const struct tep_event *tp_format;
4868 
4869 		if (evsel->priv)
4870 			continue;
4871 
4872 		tp_format = evsel__tp_format(evsel);
4873 		if (!tp_format)
4874 			continue;
4875 
4876 		if (strcmp(tp_format->system, "syscalls")) {
4877 			evsel__init_tp_arg_scnprintf(evsel, use_btf);
4878 			continue;
4879 		}
4880 
4881 		if (evsel__init_syscall_tp(evsel))
4882 			return -1;
4883 
4884 		if (!strncmp(tp_format->name, "sys_enter_", 10)) {
4885 			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4886 
4887 			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4888 				return -1;
4889 
4890 			evsel__set_syscall_arg_fmt(evsel,
4891 						   tp_format->name + sizeof("sys_enter_") - 1);
4892 		} else if (!strncmp(tp_format->name, "sys_exit_", 9)) {
4893 			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4894 
4895 			if (__tp_field__init_uint(&sc->ret, sizeof(u64),
4896 						  sc->id.offset + sizeof(u64),
4897 						  evsel->needs_swap))
4898 				return -1;
4899 
4900 			evsel__set_syscall_arg_fmt(evsel,
4901 						   tp_format->name + sizeof("sys_exit_") - 1);
4902 		}
4903 	}
4904 
4905 	return 0;
4906 }
4907 
4908 /*
4909  * XXX: Hackish, just splitting the combined -e+--event (syscalls
4910  * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4911  * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4912  *
4913  * It'd be better to introduce a parse_options() variant that would return a
4914  * list with the terms it didn't match to an event...
4915  */
4916 static int trace__parse_events_option(const struct option *opt, const char *str,
4917 				      int unset __maybe_unused)
4918 {
4919 	struct trace *trace = (struct trace *)opt->value;
4920 	const char *s = str;
4921 	char *sep = NULL, *lists[2] = { NULL, NULL, };
4922 	int len = strlen(str) + 1, err = -1, list, idx;
4923 	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4924 	char group_name[PATH_MAX];
4925 	const struct syscall_fmt *fmt;
4926 
4927 	if (strace_groups_dir == NULL)
4928 		return -1;
4929 
4930 	if (*s == '!') {
4931 		++s;
4932 		trace->not_ev_qualifier = true;
4933 	}
4934 
4935 	while (1) {
4936 		if ((sep = strchr(s, ',')) != NULL)
4937 			*sep = '\0';
4938 
4939 		list = 0;
4940 		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4941 		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4942 			list = 1;
4943 			goto do_concat;
4944 		}
4945 
4946 		fmt = syscall_fmt__find_by_alias(s);
4947 		if (fmt != NULL) {
4948 			list = 1;
4949 			s = fmt->name;
4950 		} else {
4951 			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4952 			if (access(group_name, R_OK) == 0)
4953 				list = 1;
4954 		}
4955 do_concat:
4956 		if (lists[list]) {
4957 			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4958 		} else {
4959 			lists[list] = malloc(len);
4960 			if (lists[list] == NULL)
4961 				goto out;
4962 			strcpy(lists[list], s);
4963 		}
4964 
4965 		if (!sep)
4966 			break;
4967 
4968 		*sep = ',';
4969 		s = sep + 1;
4970 	}
4971 
4972 	if (lists[1] != NULL) {
4973 		struct strlist_config slist_config = {
4974 			.dirname = strace_groups_dir,
4975 		};
4976 
4977 		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4978 		if (trace->ev_qualifier == NULL) {
4979 			fputs("Not enough memory to parse event qualifier", trace->output);
4980 			goto out;
4981 		}
4982 
4983 		if (trace__validate_ev_qualifier(trace))
4984 			goto out;
4985 		trace->trace_syscalls = true;
4986 	}
4987 
4988 	err = 0;
4989 
4990 	if (lists[0]) {
4991 		struct parse_events_option_args parse_events_option_args = {
4992 			.evlistp = &trace->evlist,
4993 		};
4994 		struct option o = {
4995 			.value = &parse_events_option_args,
4996 		};
4997 		err = parse_events_option(&o, lists[0], 0);
4998 	}
4999 out:
5000 	free(strace_groups_dir);
5001 	free(lists[0]);
5002 	free(lists[1]);
5003 	if (sep)
5004 		*sep = ',';
5005 
5006 	return err;
5007 }
5008 
5009 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
5010 {
5011 	struct trace *trace = opt->value;
5012 
5013 	if (!list_empty(&trace->evlist->core.entries)) {
5014 		struct option o = {
5015 			.value = &trace->evlist,
5016 		};
5017 		return parse_cgroups(&o, str, unset);
5018 	}
5019 	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
5020 
5021 	return 0;
5022 }
5023 
5024 static int trace__config(const char *var, const char *value, void *arg)
5025 {
5026 	struct trace *trace = arg;
5027 	int err = 0;
5028 
5029 	if (!strcmp(var, "trace.add_events")) {
5030 		trace->perfconfig_events = strdup(value);
5031 		if (trace->perfconfig_events == NULL) {
5032 			pr_err("Not enough memory for %s\n", "trace.add_events");
5033 			return -1;
5034 		}
5035 	} else if (!strcmp(var, "trace.show_timestamp")) {
5036 		trace->show_tstamp = perf_config_bool(var, value);
5037 	} else if (!strcmp(var, "trace.show_duration")) {
5038 		trace->show_duration = perf_config_bool(var, value);
5039 	} else if (!strcmp(var, "trace.show_arg_names")) {
5040 		trace->show_arg_names = perf_config_bool(var, value);
5041 		if (!trace->show_arg_names)
5042 			trace->show_zeros = true;
5043 	} else if (!strcmp(var, "trace.show_zeros")) {
5044 		bool new_show_zeros = perf_config_bool(var, value);
5045 		if (!trace->show_arg_names && !new_show_zeros) {
5046 			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
5047 			goto out;
5048 		}
5049 		trace->show_zeros = new_show_zeros;
5050 	} else if (!strcmp(var, "trace.show_prefix")) {
5051 		trace->show_string_prefix = perf_config_bool(var, value);
5052 	} else if (!strcmp(var, "trace.no_inherit")) {
5053 		trace->opts.no_inherit = perf_config_bool(var, value);
5054 	} else if (!strcmp(var, "trace.args_alignment")) {
5055 		int args_alignment = 0;
5056 		if (perf_config_int(&args_alignment, var, value) == 0)
5057 			trace->args_alignment = args_alignment;
5058 	} else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
5059 		if (strcasecmp(value, "libtraceevent") == 0)
5060 			trace->libtraceevent_print = true;
5061 		else if (strcasecmp(value, "libbeauty") == 0)
5062 			trace->libtraceevent_print = false;
5063 	}
5064 out:
5065 	return err;
5066 }
5067 
5068 static void trace__exit(struct trace *trace)
5069 {
5070 	int i;
5071 
5072 	strlist__delete(trace->ev_qualifier);
5073 	zfree(&trace->ev_qualifier_ids.entries);
5074 	if (trace->syscalls.table) {
5075 		for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
5076 			syscall__exit(&trace->syscalls.table[i]);
5077 		zfree(&trace->syscalls.table);
5078 	}
5079 	syscalltbl__delete(trace->sctbl);
5080 	zfree(&trace->perfconfig_events);
5081 }
5082 
5083 #ifdef HAVE_BPF_SKEL
5084 static int bpf__setup_bpf_output(struct evlist *evlist)
5085 {
5086 	int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
5087 
5088 	if (err)
5089 		pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
5090 
5091 	return err;
5092 }
5093 #endif
5094 
5095 int cmd_trace(int argc, const char **argv)
5096 {
5097 	const char *trace_usage[] = {
5098 		"perf trace [<options>] [<command>]",
5099 		"perf trace [<options>] -- <command> [<options>]",
5100 		"perf trace record [<options>] [<command>]",
5101 		"perf trace record [<options>] -- <command> [<options>]",
5102 		NULL
5103 	};
5104 	struct trace trace = {
5105 		.opts = {
5106 			.target = {
5107 				.uid	   = UINT_MAX,
5108 				.uses_mmap = true,
5109 			},
5110 			.user_freq     = UINT_MAX,
5111 			.user_interval = ULLONG_MAX,
5112 			.no_buffering  = true,
5113 			.mmap_pages    = UINT_MAX,
5114 		},
5115 		.output = stderr,
5116 		.show_comm = true,
5117 		.show_tstamp = true,
5118 		.show_duration = true,
5119 		.show_arg_names = true,
5120 		.args_alignment = 70,
5121 		.trace_syscalls = false,
5122 		.kernel_syscallchains = false,
5123 		.max_stack = UINT_MAX,
5124 		.max_events = ULONG_MAX,
5125 	};
5126 	const char *output_name = NULL;
5127 	const struct option trace_options[] = {
5128 	OPT_CALLBACK('e', "event", &trace, "event",
5129 		     "event/syscall selector. use 'perf list' to list available events",
5130 		     trace__parse_events_option),
5131 	OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
5132 		     "event filter", parse_filter),
5133 	OPT_BOOLEAN(0, "comm", &trace.show_comm,
5134 		    "show the thread COMM next to its id"),
5135 	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
5136 	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
5137 		     trace__parse_events_option),
5138 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
5139 	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
5140 	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
5141 		    "trace events on existing process id"),
5142 	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
5143 		    "trace events on existing thread id"),
5144 	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
5145 		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
5146 	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
5147 		    "system-wide collection from all CPUs"),
5148 	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
5149 		    "list of cpus to monitor"),
5150 	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
5151 		    "child tasks do not inherit counters"),
5152 	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
5153 		     "number of mmap data pages", evlist__parse_mmap_pages),
5154 	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
5155 		   "user to profile"),
5156 	OPT_CALLBACK(0, "duration", &trace, "float",
5157 		     "show only events with duration > N.M ms",
5158 		     trace__set_duration),
5159 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
5160 	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
5161 	OPT_BOOLEAN('T', "time", &trace.full_time,
5162 		    "Show full timestamp, not time relative to first start"),
5163 	OPT_BOOLEAN(0, "failure", &trace.failure_only,
5164 		    "Show only syscalls that failed"),
5165 	OPT_BOOLEAN('s', "summary", &trace.summary_only,
5166 		    "Show only syscall summary with statistics"),
5167 	OPT_BOOLEAN('S', "with-summary", &trace.summary,
5168 		    "Show all syscalls and summary with statistics"),
5169 	OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
5170 		    "Show errno stats per syscall, use with -s or -S"),
5171 	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
5172 		     "Trace pagefaults", parse_pagefaults, "maj"),
5173 	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
5174 	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
5175 	OPT_CALLBACK(0, "call-graph", &trace.opts,
5176 		     "record_mode[,record_size]", record_callchain_help,
5177 		     &record_parse_callchain_opt),
5178 	OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
5179 		    "Use libtraceevent to print the tracepoint arguments."),
5180 	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
5181 		    "Show the kernel callchains on the syscall exit path"),
5182 	OPT_ULONG(0, "max-events", &trace.max_events,
5183 		"Set the maximum number of events to print, exit after that is reached. "),
5184 	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
5185 		     "Set the minimum stack depth when parsing the callchain, "
5186 		     "anything below the specified depth will be ignored."),
5187 	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
5188 		     "Set the maximum stack depth when parsing the callchain, "
5189 		     "anything beyond the specified depth will be ignored. "
5190 		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
5191 	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
5192 			"Sort batch of events before processing, use if getting out of order events"),
5193 	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
5194 			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
5195 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
5196 			"per thread proc mmap processing timeout in ms"),
5197 	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
5198 		     trace__parse_cgroups),
5199 	OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
5200 		     "ms to wait before starting measurement after program "
5201 		     "start"),
5202 	OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
5203 		       "to customized ones"),
5204 	OPTS_EVSWITCH(&trace.evswitch),
5205 	OPT_END()
5206 	};
5207 	bool __maybe_unused max_stack_user_set = true;
5208 	bool mmap_pages_user_set = true;
5209 	struct evsel *evsel;
5210 	const char * const trace_subcommands[] = { "record", NULL };
5211 	int err = -1;
5212 	char bf[BUFSIZ];
5213 	struct sigaction sigchld_act;
5214 
5215 	signal(SIGSEGV, sighandler_dump_stack);
5216 	signal(SIGFPE, sighandler_dump_stack);
5217 	signal(SIGINT, sighandler_interrupt);
5218 
5219 	memset(&sigchld_act, 0, sizeof(sigchld_act));
5220 	sigchld_act.sa_flags = SA_SIGINFO;
5221 	sigchld_act.sa_sigaction = sighandler_chld;
5222 	sigaction(SIGCHLD, &sigchld_act, NULL);
5223 
5224 	trace.evlist = evlist__new();
5225 	trace.sctbl = syscalltbl__new();
5226 
5227 	if (trace.evlist == NULL || trace.sctbl == NULL) {
5228 		pr_err("Not enough memory to run!\n");
5229 		err = -ENOMEM;
5230 		goto out;
5231 	}
5232 
5233 	/*
5234 	 * Parsing .perfconfig may entail creating a BPF event, that may need
5235 	 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
5236 	 * is too small. This affects just this process, not touching the
5237 	 * global setting. If it fails we'll get something in 'perf trace -v'
5238 	 * to help diagnose the problem.
5239 	 */
5240 	rlimit__bump_memlock();
5241 
5242 	err = perf_config(trace__config, &trace);
5243 	if (err)
5244 		goto out;
5245 
5246 	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
5247 				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
5248 
5249 	/*
5250 	 * Here we already passed thru trace__parse_events_option() and it has
5251 	 * already figured out if -e syscall_name, if not but if --event
5252 	 * foo:bar was used, the user is interested _just_ in those, say,
5253 	 * tracepoint events, not in the strace-like syscall-name-based mode.
5254 	 *
5255 	 * This is important because we need to check if strace-like mode is
5256 	 * needed to decided if we should filter out the eBPF
5257 	 * __augmented_syscalls__ code, if it is in the mix, say, via
5258 	 * .perfconfig trace.add_events, and filter those out.
5259 	 */
5260 	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
5261 	    trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
5262 		trace.trace_syscalls = true;
5263 	}
5264 	/*
5265 	 * Now that we have --verbose figured out, lets see if we need to parse
5266 	 * events from .perfconfig, so that if those events fail parsing, say some
5267 	 * BPF program fails, then we'll be able to use --verbose to see what went
5268 	 * wrong in more detail.
5269 	 */
5270 	if (trace.perfconfig_events != NULL) {
5271 		struct parse_events_error parse_err;
5272 
5273 		parse_events_error__init(&parse_err);
5274 		err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
5275 		if (err)
5276 			parse_events_error__print(&parse_err, trace.perfconfig_events);
5277 		parse_events_error__exit(&parse_err);
5278 		if (err)
5279 			goto out;
5280 	}
5281 
5282 	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
5283 		usage_with_options_msg(trace_usage, trace_options,
5284 				       "cgroup monitoring only available in system-wide mode");
5285 	}
5286 
5287 #ifdef HAVE_BPF_SKEL
5288 	if (!trace.trace_syscalls)
5289 		goto skip_augmentation;
5290 
5291 	if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) {
5292 		pr_debug("Syscall augmentation fails with record, disabling augmentation");
5293 		goto skip_augmentation;
5294 	}
5295 
5296 	trace.skel = augmented_raw_syscalls_bpf__open();
5297 	if (!trace.skel) {
5298 		pr_debug("Failed to open augmented syscalls BPF skeleton");
5299 	} else {
5300 		/*
5301 		 * Disable attaching the BPF programs except for sys_enter and
5302 		 * sys_exit that tail call into this as necessary.
5303 		 */
5304 		struct bpf_program *prog;
5305 
5306 		bpf_object__for_each_program(prog, trace.skel->obj) {
5307 			if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
5308 				bpf_program__set_autoattach(prog, /*autoattach=*/false);
5309 		}
5310 
5311 		err = augmented_raw_syscalls_bpf__load(trace.skel);
5312 
5313 		if (err < 0) {
5314 			libbpf_strerror(err, bf, sizeof(bf));
5315 			pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
5316 		} else {
5317 			augmented_raw_syscalls_bpf__attach(trace.skel);
5318 			trace__add_syscall_newtp(&trace);
5319 		}
5320 	}
5321 
5322 	err = bpf__setup_bpf_output(trace.evlist);
5323 	if (err) {
5324 		libbpf_strerror(err, bf, sizeof(bf));
5325 		pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
5326 		goto out;
5327 	}
5328 	trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
5329 	assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
5330 skip_augmentation:
5331 #endif
5332 	err = -1;
5333 
5334 	if (trace.trace_pgfaults) {
5335 		trace.opts.sample_address = true;
5336 		trace.opts.sample_time = true;
5337 	}
5338 
5339 	if (trace.opts.mmap_pages == UINT_MAX)
5340 		mmap_pages_user_set = false;
5341 
5342 	if (trace.max_stack == UINT_MAX) {
5343 		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5344 		max_stack_user_set = false;
5345 	}
5346 
5347 #ifdef HAVE_DWARF_UNWIND_SUPPORT
5348 	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5349 		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5350 	}
5351 #endif
5352 
5353 	if (callchain_param.enabled) {
5354 		if (!mmap_pages_user_set && geteuid() == 0)
5355 			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5356 
5357 		symbol_conf.use_callchain = true;
5358 	}
5359 
5360 	if (trace.evlist->core.nr_entries > 0) {
5361 		bool use_btf = false;
5362 
5363 		evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5364 		if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) {
5365 			perror("failed to set syscalls:* tracepoint fields");
5366 			goto out;
5367 		}
5368 
5369 		if (use_btf)
5370 			trace__load_vmlinux_btf(&trace);
5371 	}
5372 
5373 	if (trace.sort_events) {
5374 		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5375 		ordered_events__set_copy_on_queue(&trace.oe.data, true);
5376 	}
5377 
5378 	/*
5379 	 * If we are augmenting syscalls, then combine what we put in the
5380 	 * __augmented_syscalls__ BPF map with what is in the
5381 	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5382 	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5383 	 *
5384 	 * We'll switch to look at two BPF maps, one for sys_enter and the
5385 	 * other for sys_exit when we start augmenting the sys_exit paths with
5386 	 * buffers that are being copied from kernel to userspace, think 'read'
5387 	 * syscall.
5388 	 */
5389 	if (trace.syscalls.events.bpf_output) {
5390 		evlist__for_each_entry(trace.evlist, evsel) {
5391 			bool raw_syscalls_sys_exit = evsel__name_is(evsel, "raw_syscalls:sys_exit");
5392 
5393 			if (raw_syscalls_sys_exit) {
5394 				trace.raw_augmented_syscalls = true;
5395 				goto init_augmented_syscall_tp;
5396 			}
5397 
5398 			if (trace.syscalls.events.bpf_output->priv == NULL &&
5399 			    strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5400 				struct evsel *augmented = trace.syscalls.events.bpf_output;
5401 				if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5402 				    evsel__init_augmented_syscall_tp_args(augmented))
5403 					goto out;
5404 				/*
5405 				 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5406 				 * Above we made sure we can get from the payload the tp fields
5407 				 * that we get from syscalls:sys_enter tracefs format file.
5408 				 */
5409 				augmented->handler = trace__sys_enter;
5410 				/*
5411 				 * Now we do the same for the *syscalls:sys_enter event so that
5412 				 * if we handle it directly, i.e. if the BPF prog returns 0 so
5413 				 * as not to filter it, then we'll handle it just like we would
5414 				 * for the BPF_OUTPUT one:
5415 				 */
5416 				if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5417 				    evsel__init_augmented_syscall_tp_args(evsel))
5418 					goto out;
5419 				evsel->handler = trace__sys_enter;
5420 			}
5421 
5422 			if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5423 				struct syscall_tp *sc;
5424 init_augmented_syscall_tp:
5425 				if (evsel__init_augmented_syscall_tp(evsel, evsel))
5426 					goto out;
5427 				sc = __evsel__syscall_tp(evsel);
5428 				/*
5429 				 * For now with BPF raw_augmented we hook into
5430 				 * raw_syscalls:sys_enter and there we get all
5431 				 * 6 syscall args plus the tracepoint common
5432 				 * fields and the syscall_nr (another long).
5433 				 * So we check if that is the case and if so
5434 				 * don't look after the sc->args_size but
5435 				 * always after the full raw_syscalls:sys_enter
5436 				 * payload, which is fixed.
5437 				 *
5438 				 * We'll revisit this later to pass
5439 				 * s->args_size to the BPF augmenter (now
5440 				 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5441 				 * so that it copies only what we need for each
5442 				 * syscall, like what happens when we use
5443 				 * syscalls:sys_enter_NAME, so that we reduce
5444 				 * the kernel/userspace traffic to just what is
5445 				 * needed for each syscall.
5446 				 */
5447 				if (trace.raw_augmented_syscalls)
5448 					trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5449 				evsel__init_augmented_syscall_tp_ret(evsel);
5450 				evsel->handler = trace__sys_exit;
5451 			}
5452 		}
5453 	}
5454 
5455 	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5456 		return trace__record(&trace, argc-1, &argv[1]);
5457 
5458 	/* Using just --errno-summary will trigger --summary */
5459 	if (trace.errno_summary && !trace.summary && !trace.summary_only)
5460 		trace.summary_only = true;
5461 
5462 	/* summary_only implies summary option, but don't overwrite summary if set */
5463 	if (trace.summary_only)
5464 		trace.summary = trace.summary_only;
5465 
5466 	/* Keep exited threads, otherwise information might be lost for summary */
5467 	if (trace.summary)
5468 		symbol_conf.keep_exited_threads = true;
5469 
5470 	if (output_name != NULL) {
5471 		err = trace__open_output(&trace, output_name);
5472 		if (err < 0) {
5473 			perror("failed to create output file");
5474 			goto out;
5475 		}
5476 	}
5477 
5478 	err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5479 	if (err)
5480 		goto out_close;
5481 
5482 	err = target__validate(&trace.opts.target);
5483 	if (err) {
5484 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5485 		fprintf(trace.output, "%s", bf);
5486 		goto out_close;
5487 	}
5488 
5489 	err = target__parse_uid(&trace.opts.target);
5490 	if (err) {
5491 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5492 		fprintf(trace.output, "%s", bf);
5493 		goto out_close;
5494 	}
5495 
5496 	if (!argc && target__none(&trace.opts.target))
5497 		trace.opts.target.system_wide = true;
5498 
5499 	if (input_name)
5500 		err = trace__replay(&trace);
5501 	else
5502 		err = trace__run(&trace, argc, argv);
5503 
5504 out_close:
5505 	if (output_name != NULL)
5506 		fclose(trace.output);
5507 out:
5508 	trace__exit(&trace);
5509 #ifdef HAVE_BPF_SKEL
5510 	augmented_raw_syscalls_bpf__destroy(trace.skel);
5511 #endif
5512 	return err;
5513 }
5514