xref: /linux/tools/perf/builtin-trace.c (revision e0fcfb086fbbb6233de1062d4b2f05e9afedab3b)
1 /*
2  * builtin-trace.c
3  *
4  * Builtin 'trace' command:
5  *
6  * Display a continuously updated trace of any workload, CPU, specific PID,
7  * system wide, etc.  Default format is loosely strace like, but any other
8  * event may be specified using --event.
9  *
10  * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11  *
12  * Initially based on the 'trace' prototype by Thomas Gleixner:
13  *
14  * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15  */
16 
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
20 #include <bpf/bpf.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
23 #include "builtin.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
28 #include "util/dso.h"
29 #include "util/env.h"
30 #include "util/event.h"
31 #include "util/synthetic-events.h"
32 #include "util/evlist.h"
33 #include "util/evswitch.h"
34 #include "util/mmap.h"
35 #include <subcmd/pager.h>
36 #include <subcmd/exec-cmd.h>
37 #include "util/machine.h"
38 #include "util/map.h"
39 #include "util/symbol.h"
40 #include "util/path.h"
41 #include "util/session.h"
42 #include "util/thread.h"
43 #include <subcmd/parse-options.h>
44 #include "util/strlist.h"
45 #include "util/intlist.h"
46 #include "util/thread_map.h"
47 #include "util/stat.h"
48 #include "util/tool.h"
49 #include "util/util.h"
50 #include "trace/beauty/beauty.h"
51 #include "trace-event.h"
52 #include "util/parse-events.h"
53 #include "util/bpf-loader.h"
54 #include "callchain.h"
55 #include "print_binary.h"
56 #include "string2.h"
57 #include "syscalltbl.h"
58 #include "rb_resort.h"
59 #include "../perf.h"
60 
61 #include <errno.h>
62 #include <inttypes.h>
63 #include <poll.h>
64 #include <signal.h>
65 #include <stdlib.h>
66 #include <string.h>
67 #include <linux/err.h>
68 #include <linux/filter.h>
69 #include <linux/kernel.h>
70 #include <linux/random.h>
71 #include <linux/stringify.h>
72 #include <linux/time64.h>
73 #include <linux/zalloc.h>
74 #include <fcntl.h>
75 #include <sys/sysmacros.h>
76 
77 #include <linux/ctype.h>
78 
79 #ifndef O_CLOEXEC
80 # define O_CLOEXEC		02000000
81 #endif
82 
83 #ifndef F_LINUX_SPECIFIC_BASE
84 # define F_LINUX_SPECIFIC_BASE	1024
85 #endif
86 
87 struct trace {
88 	struct perf_tool	tool;
89 	struct syscalltbl	*sctbl;
90 	struct {
91 		struct syscall  *table;
92 		struct bpf_map  *map;
93 		struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
94 			struct bpf_map  *sys_enter,
95 					*sys_exit;
96 		}		prog_array;
97 		struct {
98 			struct evsel *sys_enter,
99 					  *sys_exit,
100 					  *augmented;
101 		}		events;
102 		struct bpf_program *unaugmented_prog;
103 	} syscalls;
104 	struct {
105 		struct bpf_map *map;
106 	} dump;
107 	struct record_opts	opts;
108 	struct evlist	*evlist;
109 	struct machine		*host;
110 	struct thread		*current;
111 	struct bpf_object	*bpf_obj;
112 	struct cgroup		*cgroup;
113 	u64			base_time;
114 	FILE			*output;
115 	unsigned long		nr_events;
116 	unsigned long		nr_events_printed;
117 	unsigned long		max_events;
118 	struct evswitch		evswitch;
119 	struct strlist		*ev_qualifier;
120 	struct {
121 		size_t		nr;
122 		int		*entries;
123 	}			ev_qualifier_ids;
124 	struct {
125 		size_t		nr;
126 		pid_t		*entries;
127 		struct bpf_map  *map;
128 	}			filter_pids;
129 	double			duration_filter;
130 	double			runtime_ms;
131 	struct {
132 		u64		vfs_getname,
133 				proc_getname;
134 	} stats;
135 	unsigned int		max_stack;
136 	unsigned int		min_stack;
137 	int			raw_augmented_syscalls_args_size;
138 	bool			raw_augmented_syscalls;
139 	bool			fd_path_disabled;
140 	bool			sort_events;
141 	bool			not_ev_qualifier;
142 	bool			live;
143 	bool			full_time;
144 	bool			sched;
145 	bool			multiple_threads;
146 	bool			summary;
147 	bool			summary_only;
148 	bool			failure_only;
149 	bool			show_comm;
150 	bool			print_sample;
151 	bool			show_tool_stats;
152 	bool			trace_syscalls;
153 	bool			kernel_syscallchains;
154 	s16			args_alignment;
155 	bool			show_tstamp;
156 	bool			show_duration;
157 	bool			show_zeros;
158 	bool			show_arg_names;
159 	bool			show_string_prefix;
160 	bool			force;
161 	bool			vfs_getname;
162 	int			trace_pgfaults;
163 	struct {
164 		struct ordered_events	data;
165 		u64			last;
166 	} oe;
167 };
168 
169 struct tp_field {
170 	int offset;
171 	union {
172 		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
173 		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
174 	};
175 };
176 
177 #define TP_UINT_FIELD(bits) \
178 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
179 { \
180 	u##bits value; \
181 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
182 	return value;  \
183 }
184 
185 TP_UINT_FIELD(8);
186 TP_UINT_FIELD(16);
187 TP_UINT_FIELD(32);
188 TP_UINT_FIELD(64);
189 
190 #define TP_UINT_FIELD__SWAPPED(bits) \
191 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
192 { \
193 	u##bits value; \
194 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
195 	return bswap_##bits(value);\
196 }
197 
198 TP_UINT_FIELD__SWAPPED(16);
199 TP_UINT_FIELD__SWAPPED(32);
200 TP_UINT_FIELD__SWAPPED(64);
201 
202 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
203 {
204 	field->offset = offset;
205 
206 	switch (size) {
207 	case 1:
208 		field->integer = tp_field__u8;
209 		break;
210 	case 2:
211 		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
212 		break;
213 	case 4:
214 		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
215 		break;
216 	case 8:
217 		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
218 		break;
219 	default:
220 		return -1;
221 	}
222 
223 	return 0;
224 }
225 
226 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
227 {
228 	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
229 }
230 
231 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
232 {
233 	return sample->raw_data + field->offset;
234 }
235 
236 static int __tp_field__init_ptr(struct tp_field *field, int offset)
237 {
238 	field->offset = offset;
239 	field->pointer = tp_field__ptr;
240 	return 0;
241 }
242 
243 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
244 {
245 	return __tp_field__init_ptr(field, format_field->offset);
246 }
247 
248 struct syscall_tp {
249 	struct tp_field id;
250 	union {
251 		struct tp_field args, ret;
252 	};
253 };
254 
255 static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
256 					  struct tp_field *field,
257 					  const char *name)
258 {
259 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
260 
261 	if (format_field == NULL)
262 		return -1;
263 
264 	return tp_field__init_uint(field, format_field, evsel->needs_swap);
265 }
266 
267 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
268 	({ struct syscall_tp *sc = evsel->priv;\
269 	   perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
270 
271 static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
272 					 struct tp_field *field,
273 					 const char *name)
274 {
275 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
276 
277 	if (format_field == NULL)
278 		return -1;
279 
280 	return tp_field__init_ptr(field, format_field);
281 }
282 
283 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
284 	({ struct syscall_tp *sc = evsel->priv;\
285 	   perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
286 
287 static void evsel__delete_priv(struct evsel *evsel)
288 {
289 	zfree(&evsel->priv);
290 	evsel__delete(evsel);
291 }
292 
293 static int perf_evsel__init_syscall_tp(struct evsel *evsel)
294 {
295 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
296 
297 	if (evsel->priv != NULL) {
298 		if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
299 		    perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
300 			goto out_delete;
301 		return 0;
302 	}
303 
304 	return -ENOMEM;
305 out_delete:
306 	zfree(&evsel->priv);
307 	return -ENOENT;
308 }
309 
310 static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
311 {
312 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
313 
314 	if (evsel->priv != NULL) {
315 		struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
316 		if (syscall_id == NULL)
317 			syscall_id = perf_evsel__field(tp, "__syscall_nr");
318 		if (syscall_id == NULL)
319 			goto out_delete;
320 		if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
321 			goto out_delete;
322 
323 		return 0;
324 	}
325 
326 	return -ENOMEM;
327 out_delete:
328 	zfree(&evsel->priv);
329 	return -EINVAL;
330 }
331 
332 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
333 {
334 	struct syscall_tp *sc = evsel->priv;
335 
336 	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
337 }
338 
339 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
340 {
341 	struct syscall_tp *sc = evsel->priv;
342 
343 	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
344 }
345 
346 static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
347 {
348 	evsel->priv = malloc(sizeof(struct syscall_tp));
349 	if (evsel->priv != NULL) {
350 		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
351 			goto out_delete;
352 
353 		evsel->handler = handler;
354 		return 0;
355 	}
356 
357 	return -ENOMEM;
358 
359 out_delete:
360 	zfree(&evsel->priv);
361 	return -ENOENT;
362 }
363 
364 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
365 {
366 	struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
367 
368 	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
369 	if (IS_ERR(evsel))
370 		evsel = perf_evsel__newtp("syscalls", direction);
371 
372 	if (IS_ERR(evsel))
373 		return NULL;
374 
375 	if (perf_evsel__init_raw_syscall_tp(evsel, handler))
376 		goto out_delete;
377 
378 	return evsel;
379 
380 out_delete:
381 	evsel__delete_priv(evsel);
382 	return NULL;
383 }
384 
385 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
386 	({ struct syscall_tp *fields = evsel->priv; \
387 	   fields->name.integer(&fields->name, sample); })
388 
389 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
390 	({ struct syscall_tp *fields = evsel->priv; \
391 	   fields->name.pointer(&fields->name, sample); })
392 
393 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
394 {
395 	int idx = val - sa->offset;
396 
397 	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
398 		size_t printed = scnprintf(bf, size, intfmt, val);
399 		if (show_prefix)
400 			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
401 		return printed;
402 	}
403 
404 	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
405 }
406 
407 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
408 						const char *intfmt,
409 					        struct syscall_arg *arg)
410 {
411 	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
412 }
413 
414 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
415 					      struct syscall_arg *arg)
416 {
417 	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
418 }
419 
420 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
421 
422 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
423 {
424 	return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
425 }
426 
427 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
428 {
429 	size_t printed;
430 	int i;
431 
432 	for (i = 0; i < sas->nr_entries; ++i) {
433 		struct strarray *sa = sas->entries[i];
434 		int idx = val - sa->offset;
435 
436 		if (idx >= 0 && idx < sa->nr_entries) {
437 			if (sa->entries[idx] == NULL)
438 				break;
439 			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
440 		}
441 	}
442 
443 	printed = scnprintf(bf, size, intfmt, val);
444 	if (show_prefix)
445 		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
446 	return printed;
447 }
448 
449 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
450 					struct syscall_arg *arg)
451 {
452 	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
453 }
454 
455 #ifndef AT_FDCWD
456 #define AT_FDCWD	-100
457 #endif
458 
459 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
460 					   struct syscall_arg *arg)
461 {
462 	int fd = arg->val;
463 	const char *prefix = "AT_FD";
464 
465 	if (fd == AT_FDCWD)
466 		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
467 
468 	return syscall_arg__scnprintf_fd(bf, size, arg);
469 }
470 
471 #define SCA_FDAT syscall_arg__scnprintf_fd_at
472 
473 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
474 					      struct syscall_arg *arg);
475 
476 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
477 
478 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
479 {
480 	return scnprintf(bf, size, "%#lx", arg->val);
481 }
482 
483 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
484 {
485 	if (arg->val == 0)
486 		return scnprintf(bf, size, "NULL");
487 	return syscall_arg__scnprintf_hex(bf, size, arg);
488 }
489 
490 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
491 {
492 	return scnprintf(bf, size, "%d", arg->val);
493 }
494 
495 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
496 {
497 	return scnprintf(bf, size, "%ld", arg->val);
498 }
499 
500 static const char *bpf_cmd[] = {
501 	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
502 	"MAP_GET_NEXT_KEY", "PROG_LOAD",
503 };
504 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
505 
506 static const char *fsmount_flags[] = {
507 	[1] = "CLOEXEC",
508 };
509 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
510 
511 #include "trace/beauty/generated/fsconfig_arrays.c"
512 
513 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
514 
515 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
516 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
517 
518 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
519 static DEFINE_STRARRAY(itimers, "ITIMER_");
520 
521 static const char *keyctl_options[] = {
522 	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
523 	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
524 	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
525 	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
526 	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
527 };
528 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
529 
530 static const char *whences[] = { "SET", "CUR", "END",
531 #ifdef SEEK_DATA
532 "DATA",
533 #endif
534 #ifdef SEEK_HOLE
535 "HOLE",
536 #endif
537 };
538 static DEFINE_STRARRAY(whences, "SEEK_");
539 
540 static const char *fcntl_cmds[] = {
541 	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
542 	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
543 	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
544 	"GETOWNER_UIDS",
545 };
546 static DEFINE_STRARRAY(fcntl_cmds, "F_");
547 
548 static const char *fcntl_linux_specific_cmds[] = {
549 	"SETLEASE", "GETLEASE", "NOTIFY", [5] =	"CANCELLK", "DUPFD_CLOEXEC",
550 	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
551 	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
552 };
553 
554 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
555 
556 static struct strarray *fcntl_cmds_arrays[] = {
557 	&strarray__fcntl_cmds,
558 	&strarray__fcntl_linux_specific_cmds,
559 };
560 
561 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
562 
563 static const char *rlimit_resources[] = {
564 	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
565 	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
566 	"RTTIME",
567 };
568 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
569 
570 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
571 static DEFINE_STRARRAY(sighow, "SIG_");
572 
573 static const char *clockid[] = {
574 	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
575 	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
576 	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
577 };
578 static DEFINE_STRARRAY(clockid, "CLOCK_");
579 
580 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
581 						 struct syscall_arg *arg)
582 {
583 	bool show_prefix = arg->show_string_prefix;
584 	const char *suffix = "_OK";
585 	size_t printed = 0;
586 	int mode = arg->val;
587 
588 	if (mode == F_OK) /* 0 */
589 		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
590 #define	P_MODE(n) \
591 	if (mode & n##_OK) { \
592 		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
593 		mode &= ~n##_OK; \
594 	}
595 
596 	P_MODE(R);
597 	P_MODE(W);
598 	P_MODE(X);
599 #undef P_MODE
600 
601 	if (mode)
602 		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
603 
604 	return printed;
605 }
606 
607 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
608 
609 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
610 					      struct syscall_arg *arg);
611 
612 #define SCA_FILENAME syscall_arg__scnprintf_filename
613 
614 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
615 						struct syscall_arg *arg)
616 {
617 	bool show_prefix = arg->show_string_prefix;
618 	const char *prefix = "O_";
619 	int printed = 0, flags = arg->val;
620 
621 #define	P_FLAG(n) \
622 	if (flags & O_##n) { \
623 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
624 		flags &= ~O_##n; \
625 	}
626 
627 	P_FLAG(CLOEXEC);
628 	P_FLAG(NONBLOCK);
629 #undef P_FLAG
630 
631 	if (flags)
632 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
633 
634 	return printed;
635 }
636 
637 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
638 
639 #ifndef GRND_NONBLOCK
640 #define GRND_NONBLOCK	0x0001
641 #endif
642 #ifndef GRND_RANDOM
643 #define GRND_RANDOM	0x0002
644 #endif
645 
646 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
647 						   struct syscall_arg *arg)
648 {
649 	bool show_prefix = arg->show_string_prefix;
650 	const char *prefix = "GRND_";
651 	int printed = 0, flags = arg->val;
652 
653 #define	P_FLAG(n) \
654 	if (flags & GRND_##n) { \
655 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
656 		flags &= ~GRND_##n; \
657 	}
658 
659 	P_FLAG(RANDOM);
660 	P_FLAG(NONBLOCK);
661 #undef P_FLAG
662 
663 	if (flags)
664 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
665 
666 	return printed;
667 }
668 
669 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
670 
671 #define STRARRAY(name, array) \
672 	  { .scnprintf	= SCA_STRARRAY, \
673 	    .parm	= &strarray__##array, }
674 
675 #define STRARRAY_FLAGS(name, array) \
676 	  { .scnprintf	= SCA_STRARRAY_FLAGS, \
677 	    .parm	= &strarray__##array, }
678 
679 #include "trace/beauty/arch_errno_names.c"
680 #include "trace/beauty/eventfd.c"
681 #include "trace/beauty/futex_op.c"
682 #include "trace/beauty/futex_val3.c"
683 #include "trace/beauty/mmap.c"
684 #include "trace/beauty/mode_t.c"
685 #include "trace/beauty/msg_flags.c"
686 #include "trace/beauty/open_flags.c"
687 #include "trace/beauty/perf_event_open.c"
688 #include "trace/beauty/pid.c"
689 #include "trace/beauty/sched_policy.c"
690 #include "trace/beauty/seccomp.c"
691 #include "trace/beauty/signum.c"
692 #include "trace/beauty/socket_type.c"
693 #include "trace/beauty/waitid_options.c"
694 
695 struct syscall_arg_fmt {
696 	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
697 	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
698 	void	   *parm;
699 	const char *name;
700 	bool	   show_zero;
701 };
702 
703 static struct syscall_fmt {
704 	const char *name;
705 	const char *alias;
706 	struct {
707 		const char *sys_enter,
708 			   *sys_exit;
709 	}	   bpf_prog_name;
710 	struct syscall_arg_fmt arg[6];
711 	u8	   nr_args;
712 	bool	   errpid;
713 	bool	   timeout;
714 	bool	   hexret;
715 } syscall_fmts[] = {
716 	{ .name	    = "access",
717 	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
718 	{ .name	    = "arch_prctl",
719 	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
720 		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
721 	{ .name	    = "bind",
722 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
723 		   [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
724 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
725 	{ .name	    = "bpf",
726 	  .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
727 	{ .name	    = "brk",	    .hexret = true,
728 	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
729 	{ .name     = "clock_gettime",
730 	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
731 	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
732 	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
733 		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
734 		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
735 		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
736 		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
737 	{ .name	    = "close",
738 	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
739 	{ .name	    = "connect",
740 	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
741 		   [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
742 		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
743 	{ .name	    = "epoll_ctl",
744 	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
745 	{ .name	    = "eventfd2",
746 	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
747 	{ .name	    = "fchmodat",
748 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
749 	{ .name	    = "fchownat",
750 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
751 	{ .name	    = "fcntl",
752 	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
753 			   .parm      = &strarrays__fcntl_cmds_arrays,
754 			   .show_zero = true, },
755 		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
756 	{ .name	    = "flock",
757 	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
758 	{ .name     = "fsconfig",
759 	  .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
760 	{ .name     = "fsmount",
761 	  .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
762 		   [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
763 	{ .name     = "fspick",
764 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
765 		   [1] = { .scnprintf = SCA_FILENAME,	  /* path */ },
766 		   [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
767 	{ .name	    = "fstat", .alias = "newfstat", },
768 	{ .name	    = "fstatat", .alias = "newfstatat", },
769 	{ .name	    = "futex",
770 	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
771 		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
772 	{ .name	    = "futimesat",
773 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
774 	{ .name	    = "getitimer",
775 	  .arg = { [0] = STRARRAY(which, itimers), }, },
776 	{ .name	    = "getpid",	    .errpid = true, },
777 	{ .name	    = "getpgid",    .errpid = true, },
778 	{ .name	    = "getppid",    .errpid = true, },
779 	{ .name	    = "getrandom",
780 	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
781 	{ .name	    = "getrlimit",
782 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
783 	{ .name	    = "gettid",	    .errpid = true, },
784 	{ .name	    = "ioctl",
785 	  .arg = {
786 #if defined(__i386__) || defined(__x86_64__)
787 /*
788  * FIXME: Make this available to all arches.
789  */
790 		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
791 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
792 #else
793 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
794 #endif
795 	{ .name	    = "kcmp",	    .nr_args = 5,
796 	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
797 		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
798 		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
799 		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
800 		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
801 	{ .name	    = "keyctl",
802 	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
803 	{ .name	    = "kill",
804 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
805 	{ .name	    = "linkat",
806 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
807 	{ .name	    = "lseek",
808 	  .arg = { [2] = STRARRAY(whence, whences), }, },
809 	{ .name	    = "lstat", .alias = "newlstat", },
810 	{ .name     = "madvise",
811 	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
812 		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
813 	{ .name	    = "mkdirat",
814 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
815 	{ .name	    = "mknodat",
816 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
817 	{ .name	    = "mmap",	    .hexret = true,
818 /* The standard mmap maps to old_mmap on s390x */
819 #if defined(__s390x__)
820 	.alias = "old_mmap",
821 #endif
822 	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
823 		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */ },
824 		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
825 	{ .name	    = "mount",
826 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
827 		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
828 			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
829 	{ .name	    = "move_mount",
830 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* from_dfd */ },
831 		   [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
832 		   [2] = { .scnprintf = SCA_FDAT,	/* to_dfd */ },
833 		   [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
834 		   [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
835 	{ .name	    = "mprotect",
836 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
837 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ }, }, },
838 	{ .name	    = "mq_unlink",
839 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
840 	{ .name	    = "mremap",	    .hexret = true,
841 	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
842 	{ .name	    = "name_to_handle_at",
843 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
844 	{ .name	    = "newfstatat",
845 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
846 	{ .name	    = "open",
847 	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
848 	{ .name	    = "open_by_handle_at",
849 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
850 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
851 	{ .name	    = "openat",
852 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
853 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
854 	{ .name	    = "perf_event_open",
855 	  .arg = { [2] = { .scnprintf = SCA_INT,	/* cpu */ },
856 		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
857 		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
858 	{ .name	    = "pipe2",
859 	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
860 	{ .name	    = "pkey_alloc",
861 	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
862 	{ .name	    = "pkey_free",
863 	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
864 	{ .name	    = "pkey_mprotect",
865 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
866 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
867 		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
868 	{ .name	    = "poll", .timeout = true, },
869 	{ .name	    = "ppoll", .timeout = true, },
870 	{ .name	    = "prctl",
871 	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
872 		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
873 		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
874 	{ .name	    = "pread", .alias = "pread64", },
875 	{ .name	    = "preadv", .alias = "pread", },
876 	{ .name	    = "prlimit64",
877 	  .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
878 	{ .name	    = "pwrite", .alias = "pwrite64", },
879 	{ .name	    = "readlinkat",
880 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
881 	{ .name	    = "recvfrom",
882 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
883 	{ .name	    = "recvmmsg",
884 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
885 	{ .name	    = "recvmsg",
886 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
887 	{ .name	    = "renameat",
888 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
889 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
890 	{ .name	    = "renameat2",
891 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
892 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
893 		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
894 	{ .name	    = "rt_sigaction",
895 	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
896 	{ .name	    = "rt_sigprocmask",
897 	  .arg = { [0] = STRARRAY(how, sighow), }, },
898 	{ .name	    = "rt_sigqueueinfo",
899 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
900 	{ .name	    = "rt_tgsigqueueinfo",
901 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
902 	{ .name	    = "sched_setscheduler",
903 	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
904 	{ .name	    = "seccomp",
905 	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
906 		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
907 	{ .name	    = "select", .timeout = true, },
908 	{ .name	    = "sendfile", .alias = "sendfile64", },
909 	{ .name	    = "sendmmsg",
910 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
911 	{ .name	    = "sendmsg",
912 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
913 	{ .name	    = "sendto",
914 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
915 		   [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
916 	{ .name	    = "set_tid_address", .errpid = true, },
917 	{ .name	    = "setitimer",
918 	  .arg = { [0] = STRARRAY(which, itimers), }, },
919 	{ .name	    = "setrlimit",
920 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
921 	{ .name	    = "socket",
922 	  .arg = { [0] = STRARRAY(family, socket_families),
923 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
924 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
925 	{ .name	    = "socketpair",
926 	  .arg = { [0] = STRARRAY(family, socket_families),
927 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
928 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
929 	{ .name	    = "stat", .alias = "newstat", },
930 	{ .name	    = "statx",
931 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
932 		   [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
933 		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
934 	{ .name	    = "swapoff",
935 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
936 	{ .name	    = "swapon",
937 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
938 	{ .name	    = "symlinkat",
939 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
940 	{ .name	    = "sync_file_range",
941 	  .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
942 	{ .name	    = "tgkill",
943 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
944 	{ .name	    = "tkill",
945 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
946 	{ .name     = "umount2", .alias = "umount",
947 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
948 	{ .name	    = "uname", .alias = "newuname", },
949 	{ .name	    = "unlinkat",
950 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
951 	{ .name	    = "utimensat",
952 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
953 	{ .name	    = "wait4",	    .errpid = true,
954 	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
955 	{ .name	    = "waitid",	    .errpid = true,
956 	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
957 };
958 
959 static int syscall_fmt__cmp(const void *name, const void *fmtp)
960 {
961 	const struct syscall_fmt *fmt = fmtp;
962 	return strcmp(name, fmt->name);
963 }
964 
965 static struct syscall_fmt *syscall_fmt__find(const char *name)
966 {
967 	const int nmemb = ARRAY_SIZE(syscall_fmts);
968 	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
969 }
970 
971 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
972 {
973 	int i, nmemb = ARRAY_SIZE(syscall_fmts);
974 
975 	for (i = 0; i < nmemb; ++i) {
976 		if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
977 			return &syscall_fmts[i];
978 	}
979 
980 	return NULL;
981 }
982 
983 /*
984  * is_exit: is this "exit" or "exit_group"?
985  * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
986  * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
987  * nonexistent: Just a hole in the syscall table, syscall id not allocated
988  */
989 struct syscall {
990 	struct tep_event    *tp_format;
991 	int		    nr_args;
992 	int		    args_size;
993 	struct {
994 		struct bpf_program *sys_enter,
995 				   *sys_exit;
996 	}		    bpf_prog;
997 	bool		    is_exit;
998 	bool		    is_open;
999 	bool		    nonexistent;
1000 	struct tep_format_field *args;
1001 	const char	    *name;
1002 	struct syscall_fmt  *fmt;
1003 	struct syscall_arg_fmt *arg_fmt;
1004 };
1005 
1006 /*
1007  * Must match what is in the BPF program:
1008  *
1009  * tools/perf/examples/bpf/augmented_raw_syscalls.c
1010  */
1011 struct bpf_map_syscall_entry {
1012 	bool	enabled;
1013 	u16	string_args_len[6];
1014 };
1015 
1016 /*
1017  * We need to have this 'calculated' boolean because in some cases we really
1018  * don't know what is the duration of a syscall, for instance, when we start
1019  * a session and some threads are waiting for a syscall to finish, say 'poll',
1020  * in which case all we can do is to print "( ? ) for duration and for the
1021  * start timestamp.
1022  */
1023 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1024 {
1025 	double duration = (double)t / NSEC_PER_MSEC;
1026 	size_t printed = fprintf(fp, "(");
1027 
1028 	if (!calculated)
1029 		printed += fprintf(fp, "         ");
1030 	else if (duration >= 1.0)
1031 		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1032 	else if (duration >= 0.01)
1033 		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1034 	else
1035 		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1036 	return printed + fprintf(fp, "): ");
1037 }
1038 
1039 /**
1040  * filename.ptr: The filename char pointer that will be vfs_getname'd
1041  * filename.entry_str_pos: Where to insert the string translated from
1042  *                         filename.ptr by the vfs_getname tracepoint/kprobe.
1043  * ret_scnprintf: syscall args may set this to a different syscall return
1044  *                formatter, for instance, fcntl may return fds, file flags, etc.
1045  */
1046 struct thread_trace {
1047 	u64		  entry_time;
1048 	bool		  entry_pending;
1049 	unsigned long	  nr_events;
1050 	unsigned long	  pfmaj, pfmin;
1051 	char		  *entry_str;
1052 	double		  runtime_ms;
1053 	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1054         struct {
1055 		unsigned long ptr;
1056 		short int     entry_str_pos;
1057 		bool	      pending_open;
1058 		unsigned int  namelen;
1059 		char	      *name;
1060 	} filename;
1061 	struct {
1062 		int	      max;
1063 		struct file   *table;
1064 	} files;
1065 
1066 	struct intlist *syscall_stats;
1067 };
1068 
1069 static struct thread_trace *thread_trace__new(void)
1070 {
1071 	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1072 
1073 	if (ttrace) {
1074 		ttrace->files.max = -1;
1075 		ttrace->syscall_stats = intlist__new(NULL);
1076 	}
1077 
1078 	return ttrace;
1079 }
1080 
1081 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1082 {
1083 	struct thread_trace *ttrace;
1084 
1085 	if (thread == NULL)
1086 		goto fail;
1087 
1088 	if (thread__priv(thread) == NULL)
1089 		thread__set_priv(thread, thread_trace__new());
1090 
1091 	if (thread__priv(thread) == NULL)
1092 		goto fail;
1093 
1094 	ttrace = thread__priv(thread);
1095 	++ttrace->nr_events;
1096 
1097 	return ttrace;
1098 fail:
1099 	color_fprintf(fp, PERF_COLOR_RED,
1100 		      "WARNING: not enough memory, dropping samples!\n");
1101 	return NULL;
1102 }
1103 
1104 
1105 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1106 				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1107 {
1108 	struct thread_trace *ttrace = thread__priv(arg->thread);
1109 
1110 	ttrace->ret_scnprintf = ret_scnprintf;
1111 }
1112 
1113 #define TRACE_PFMAJ		(1 << 0)
1114 #define TRACE_PFMIN		(1 << 1)
1115 
1116 static const size_t trace__entry_str_size = 2048;
1117 
1118 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1119 {
1120 	if (fd < 0)
1121 		return NULL;
1122 
1123 	if (fd > ttrace->files.max) {
1124 		struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1125 
1126 		if (nfiles == NULL)
1127 			return NULL;
1128 
1129 		if (ttrace->files.max != -1) {
1130 			memset(nfiles + ttrace->files.max + 1, 0,
1131 			       (fd - ttrace->files.max) * sizeof(struct file));
1132 		} else {
1133 			memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1134 		}
1135 
1136 		ttrace->files.table = nfiles;
1137 		ttrace->files.max   = fd;
1138 	}
1139 
1140 	return ttrace->files.table + fd;
1141 }
1142 
1143 struct file *thread__files_entry(struct thread *thread, int fd)
1144 {
1145 	return thread_trace__files_entry(thread__priv(thread), fd);
1146 }
1147 
1148 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1149 {
1150 	struct thread_trace *ttrace = thread__priv(thread);
1151 	struct file *file = thread_trace__files_entry(ttrace, fd);
1152 
1153 	if (file != NULL) {
1154 		struct stat st;
1155 		if (stat(pathname, &st) == 0)
1156 			file->dev_maj = major(st.st_rdev);
1157 		file->pathname = strdup(pathname);
1158 		if (file->pathname)
1159 			return 0;
1160 	}
1161 
1162 	return -1;
1163 }
1164 
1165 static int thread__read_fd_path(struct thread *thread, int fd)
1166 {
1167 	char linkname[PATH_MAX], pathname[PATH_MAX];
1168 	struct stat st;
1169 	int ret;
1170 
1171 	if (thread->pid_ == thread->tid) {
1172 		scnprintf(linkname, sizeof(linkname),
1173 			  "/proc/%d/fd/%d", thread->pid_, fd);
1174 	} else {
1175 		scnprintf(linkname, sizeof(linkname),
1176 			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1177 	}
1178 
1179 	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1180 		return -1;
1181 
1182 	ret = readlink(linkname, pathname, sizeof(pathname));
1183 
1184 	if (ret < 0 || ret > st.st_size)
1185 		return -1;
1186 
1187 	pathname[ret] = '\0';
1188 	return trace__set_fd_pathname(thread, fd, pathname);
1189 }
1190 
1191 static const char *thread__fd_path(struct thread *thread, int fd,
1192 				   struct trace *trace)
1193 {
1194 	struct thread_trace *ttrace = thread__priv(thread);
1195 
1196 	if (ttrace == NULL || trace->fd_path_disabled)
1197 		return NULL;
1198 
1199 	if (fd < 0)
1200 		return NULL;
1201 
1202 	if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1203 		if (!trace->live)
1204 			return NULL;
1205 		++trace->stats.proc_getname;
1206 		if (thread__read_fd_path(thread, fd))
1207 			return NULL;
1208 	}
1209 
1210 	return ttrace->files.table[fd].pathname;
1211 }
1212 
1213 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1214 {
1215 	int fd = arg->val;
1216 	size_t printed = scnprintf(bf, size, "%d", fd);
1217 	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1218 
1219 	if (path)
1220 		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1221 
1222 	return printed;
1223 }
1224 
1225 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1226 {
1227         size_t printed = scnprintf(bf, size, "%d", fd);
1228 	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1229 
1230 	if (thread) {
1231 		const char *path = thread__fd_path(thread, fd, trace);
1232 
1233 		if (path)
1234 			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1235 
1236 		thread__put(thread);
1237 	}
1238 
1239         return printed;
1240 }
1241 
1242 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1243 					      struct syscall_arg *arg)
1244 {
1245 	int fd = arg->val;
1246 	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1247 	struct thread_trace *ttrace = thread__priv(arg->thread);
1248 
1249 	if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1250 		zfree(&ttrace->files.table[fd].pathname);
1251 
1252 	return printed;
1253 }
1254 
1255 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1256 				     unsigned long ptr)
1257 {
1258 	struct thread_trace *ttrace = thread__priv(thread);
1259 
1260 	ttrace->filename.ptr = ptr;
1261 	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1262 }
1263 
1264 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1265 {
1266 	struct augmented_arg *augmented_arg = arg->augmented.args;
1267 	size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1268 	/*
1269 	 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1270 	 * we would have two strings, each prefixed by its size.
1271 	 */
1272 	int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1273 
1274 	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1275 	arg->augmented.size -= consumed;
1276 
1277 	return printed;
1278 }
1279 
1280 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1281 					      struct syscall_arg *arg)
1282 {
1283 	unsigned long ptr = arg->val;
1284 
1285 	if (arg->augmented.args)
1286 		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1287 
1288 	if (!arg->trace->vfs_getname)
1289 		return scnprintf(bf, size, "%#x", ptr);
1290 
1291 	thread__set_filename_pos(arg->thread, bf, ptr);
1292 	return 0;
1293 }
1294 
1295 static bool trace__filter_duration(struct trace *trace, double t)
1296 {
1297 	return t < (trace->duration_filter * NSEC_PER_MSEC);
1298 }
1299 
1300 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1301 {
1302 	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1303 
1304 	return fprintf(fp, "%10.3f ", ts);
1305 }
1306 
1307 /*
1308  * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1309  * using ttrace->entry_time for a thread that receives a sys_exit without
1310  * first having received a sys_enter ("poll" issued before tracing session
1311  * starts, lost sys_enter exit due to ring buffer overflow).
1312  */
1313 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1314 {
1315 	if (tstamp > 0)
1316 		return __trace__fprintf_tstamp(trace, tstamp, fp);
1317 
1318 	return fprintf(fp, "         ? ");
1319 }
1320 
1321 static bool done = false;
1322 static bool interrupted = false;
1323 
1324 static void sig_handler(int sig)
1325 {
1326 	done = true;
1327 	interrupted = sig == SIGINT;
1328 }
1329 
1330 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1331 {
1332 	size_t printed = 0;
1333 
1334 	if (trace->multiple_threads) {
1335 		if (trace->show_comm)
1336 			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1337 		printed += fprintf(fp, "%d ", thread->tid);
1338 	}
1339 
1340 	return printed;
1341 }
1342 
1343 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1344 					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1345 {
1346 	size_t printed = 0;
1347 
1348 	if (trace->show_tstamp)
1349 		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1350 	if (trace->show_duration)
1351 		printed += fprintf_duration(duration, duration_calculated, fp);
1352 	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1353 }
1354 
1355 static int trace__process_event(struct trace *trace, struct machine *machine,
1356 				union perf_event *event, struct perf_sample *sample)
1357 {
1358 	int ret = 0;
1359 
1360 	switch (event->header.type) {
1361 	case PERF_RECORD_LOST:
1362 		color_fprintf(trace->output, PERF_COLOR_RED,
1363 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1364 		ret = machine__process_lost_event(machine, event, sample);
1365 		break;
1366 	default:
1367 		ret = machine__process_event(machine, event, sample);
1368 		break;
1369 	}
1370 
1371 	return ret;
1372 }
1373 
1374 static int trace__tool_process(struct perf_tool *tool,
1375 			       union perf_event *event,
1376 			       struct perf_sample *sample,
1377 			       struct machine *machine)
1378 {
1379 	struct trace *trace = container_of(tool, struct trace, tool);
1380 	return trace__process_event(trace, machine, event, sample);
1381 }
1382 
1383 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1384 {
1385 	struct machine *machine = vmachine;
1386 
1387 	if (machine->kptr_restrict_warned)
1388 		return NULL;
1389 
1390 	if (symbol_conf.kptr_restrict) {
1391 		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1392 			   "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1393 			   "Kernel samples will not be resolved.\n");
1394 		machine->kptr_restrict_warned = true;
1395 		return NULL;
1396 	}
1397 
1398 	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1399 }
1400 
1401 static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1402 {
1403 	int err = symbol__init(NULL);
1404 
1405 	if (err)
1406 		return err;
1407 
1408 	trace->host = machine__new_host();
1409 	if (trace->host == NULL)
1410 		return -ENOMEM;
1411 
1412 	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1413 	if (err < 0)
1414 		goto out;
1415 
1416 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1417 					    evlist->core.threads, trace__tool_process, false,
1418 					    1);
1419 out:
1420 	if (err)
1421 		symbol__exit();
1422 
1423 	return err;
1424 }
1425 
1426 static void trace__symbols__exit(struct trace *trace)
1427 {
1428 	machine__exit(trace->host);
1429 	trace->host = NULL;
1430 
1431 	symbol__exit();
1432 }
1433 
1434 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1435 {
1436 	int idx;
1437 
1438 	if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1439 		nr_args = sc->fmt->nr_args;
1440 
1441 	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1442 	if (sc->arg_fmt == NULL)
1443 		return -1;
1444 
1445 	for (idx = 0; idx < nr_args; ++idx) {
1446 		if (sc->fmt)
1447 			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1448 	}
1449 
1450 	sc->nr_args = nr_args;
1451 	return 0;
1452 }
1453 
1454 static int syscall__set_arg_fmts(struct syscall *sc)
1455 {
1456 	struct tep_format_field *field, *last_field = NULL;
1457 	int idx = 0, len;
1458 
1459 	for (field = sc->args; field; field = field->next, ++idx) {
1460 		last_field = field;
1461 
1462 		if (sc->fmt && sc->fmt->arg[idx].scnprintf)
1463 			continue;
1464 
1465 		len = strlen(field->name);
1466 
1467 		if (strcmp(field->type, "const char *") == 0 &&
1468 		    ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1469 		     strstr(field->name, "path") != NULL))
1470 			sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
1471 		else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1472 			sc->arg_fmt[idx].scnprintf = SCA_PTR;
1473 		else if (strcmp(field->type, "pid_t") == 0)
1474 			sc->arg_fmt[idx].scnprintf = SCA_PID;
1475 		else if (strcmp(field->type, "umode_t") == 0)
1476 			sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
1477 		else if ((strcmp(field->type, "int") == 0 ||
1478 			  strcmp(field->type, "unsigned int") == 0 ||
1479 			  strcmp(field->type, "long") == 0) &&
1480 			 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1481 			/*
1482 			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1483 			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1484 			 * 65 int
1485 			 * 23 unsigned int
1486 			 * 7 unsigned long
1487 			 */
1488 			sc->arg_fmt[idx].scnprintf = SCA_FD;
1489 		}
1490 	}
1491 
1492 	if (last_field)
1493 		sc->args_size = last_field->offset + last_field->size;
1494 
1495 	return 0;
1496 }
1497 
1498 static int trace__read_syscall_info(struct trace *trace, int id)
1499 {
1500 	char tp_name[128];
1501 	struct syscall *sc;
1502 	const char *name = syscalltbl__name(trace->sctbl, id);
1503 
1504 	if (trace->syscalls.table == NULL) {
1505 		trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1506 		if (trace->syscalls.table == NULL)
1507 			return -ENOMEM;
1508 	}
1509 
1510 	sc = trace->syscalls.table + id;
1511 	if (sc->nonexistent)
1512 		return 0;
1513 
1514 	if (name == NULL) {
1515 		sc->nonexistent = true;
1516 		return 0;
1517 	}
1518 
1519 	sc->name = name;
1520 	sc->fmt  = syscall_fmt__find(sc->name);
1521 
1522 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1523 	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1524 
1525 	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1526 		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1527 		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1528 	}
1529 
1530 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1531 		return -ENOMEM;
1532 
1533 	if (IS_ERR(sc->tp_format))
1534 		return PTR_ERR(sc->tp_format);
1535 
1536 	sc->args = sc->tp_format->format.fields;
1537 	/*
1538 	 * We need to check and discard the first variable '__syscall_nr'
1539 	 * or 'nr' that mean the syscall number. It is needless here.
1540 	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1541 	 */
1542 	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1543 		sc->args = sc->args->next;
1544 		--sc->nr_args;
1545 	}
1546 
1547 	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1548 	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1549 
1550 	return syscall__set_arg_fmts(sc);
1551 }
1552 
1553 static int intcmp(const void *a, const void *b)
1554 {
1555 	const int *one = a, *another = b;
1556 
1557 	return *one - *another;
1558 }
1559 
1560 static int trace__validate_ev_qualifier(struct trace *trace)
1561 {
1562 	int err = 0;
1563 	bool printed_invalid_prefix = false;
1564 	struct str_node *pos;
1565 	size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1566 
1567 	trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1568 						 sizeof(trace->ev_qualifier_ids.entries[0]));
1569 
1570 	if (trace->ev_qualifier_ids.entries == NULL) {
1571 		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1572 		       trace->output);
1573 		err = -EINVAL;
1574 		goto out;
1575 	}
1576 
1577 	strlist__for_each_entry(pos, trace->ev_qualifier) {
1578 		const char *sc = pos->s;
1579 		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1580 
1581 		if (id < 0) {
1582 			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1583 			if (id >= 0)
1584 				goto matches;
1585 
1586 			if (!printed_invalid_prefix) {
1587 				pr_debug("Skipping unknown syscalls: ");
1588 				printed_invalid_prefix = true;
1589 			} else {
1590 				pr_debug(", ");
1591 			}
1592 
1593 			pr_debug("%s", sc);
1594 			continue;
1595 		}
1596 matches:
1597 		trace->ev_qualifier_ids.entries[nr_used++] = id;
1598 		if (match_next == -1)
1599 			continue;
1600 
1601 		while (1) {
1602 			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1603 			if (id < 0)
1604 				break;
1605 			if (nr_allocated == nr_used) {
1606 				void *entries;
1607 
1608 				nr_allocated += 8;
1609 				entries = realloc(trace->ev_qualifier_ids.entries,
1610 						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1611 				if (entries == NULL) {
1612 					err = -ENOMEM;
1613 					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1614 					goto out_free;
1615 				}
1616 				trace->ev_qualifier_ids.entries = entries;
1617 			}
1618 			trace->ev_qualifier_ids.entries[nr_used++] = id;
1619 		}
1620 	}
1621 
1622 	trace->ev_qualifier_ids.nr = nr_used;
1623 	qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1624 out:
1625 	if (printed_invalid_prefix)
1626 		pr_debug("\n");
1627 	return err;
1628 out_free:
1629 	zfree(&trace->ev_qualifier_ids.entries);
1630 	trace->ev_qualifier_ids.nr = 0;
1631 	goto out;
1632 }
1633 
1634 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1635 {
1636 	bool in_ev_qualifier;
1637 
1638 	if (trace->ev_qualifier_ids.nr == 0)
1639 		return true;
1640 
1641 	in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1642 				  trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1643 
1644 	if (in_ev_qualifier)
1645 	       return !trace->not_ev_qualifier;
1646 
1647 	return trace->not_ev_qualifier;
1648 }
1649 
1650 /*
1651  * args is to be interpreted as a series of longs but we need to handle
1652  * 8-byte unaligned accesses. args points to raw_data within the event
1653  * and raw_data is guaranteed to be 8-byte unaligned because it is
1654  * preceded by raw_size which is a u32. So we need to copy args to a temp
1655  * variable to read it. Most notably this avoids extended load instructions
1656  * on unaligned addresses
1657  */
1658 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1659 {
1660 	unsigned long val;
1661 	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1662 
1663 	memcpy(&val, p, sizeof(val));
1664 	return val;
1665 }
1666 
1667 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1668 				      struct syscall_arg *arg)
1669 {
1670 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1671 		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1672 
1673 	return scnprintf(bf, size, "arg%d: ", arg->idx);
1674 }
1675 
1676 /*
1677  * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1678  * as mount 'flags' argument that needs ignoring some magic flag, see comment
1679  * in tools/perf/trace/beauty/mount_flags.c
1680  */
1681 static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
1682 {
1683 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
1684 		return sc->arg_fmt[arg->idx].mask_val(arg, val);
1685 
1686 	return val;
1687 }
1688 
1689 static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1690 				     struct syscall_arg *arg, unsigned long val)
1691 {
1692 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1693 		arg->val = val;
1694 		if (sc->arg_fmt[arg->idx].parm)
1695 			arg->parm = sc->arg_fmt[arg->idx].parm;
1696 		return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1697 	}
1698 	return scnprintf(bf, size, "%ld", val);
1699 }
1700 
1701 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1702 				      unsigned char *args, void *augmented_args, int augmented_args_size,
1703 				      struct trace *trace, struct thread *thread)
1704 {
1705 	size_t printed = 0;
1706 	unsigned long val;
1707 	u8 bit = 1;
1708 	struct syscall_arg arg = {
1709 		.args	= args,
1710 		.augmented = {
1711 			.size = augmented_args_size,
1712 			.args = augmented_args,
1713 		},
1714 		.idx	= 0,
1715 		.mask	= 0,
1716 		.trace  = trace,
1717 		.thread = thread,
1718 		.show_string_prefix = trace->show_string_prefix,
1719 	};
1720 	struct thread_trace *ttrace = thread__priv(thread);
1721 
1722 	/*
1723 	 * Things like fcntl will set this in its 'cmd' formatter to pick the
1724 	 * right formatter for the return value (an fd? file flags?), which is
1725 	 * not needed for syscalls that always return a given type, say an fd.
1726 	 */
1727 	ttrace->ret_scnprintf = NULL;
1728 
1729 	if (sc->args != NULL) {
1730 		struct tep_format_field *field;
1731 
1732 		for (field = sc->args; field;
1733 		     field = field->next, ++arg.idx, bit <<= 1) {
1734 			if (arg.mask & bit)
1735 				continue;
1736 
1737 			val = syscall_arg__val(&arg, arg.idx);
1738 			/*
1739 			 * Some syscall args need some mask, most don't and
1740 			 * return val untouched.
1741 			 */
1742 			val = syscall__mask_val(sc, &arg, val);
1743 
1744 			/*
1745  			 * Suppress this argument if its value is zero and
1746  			 * and we don't have a string associated in an
1747  			 * strarray for it.
1748  			 */
1749 			if (val == 0 &&
1750 			    !trace->show_zeros &&
1751 			    !(sc->arg_fmt &&
1752 			      (sc->arg_fmt[arg.idx].show_zero ||
1753 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1754 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1755 			      sc->arg_fmt[arg.idx].parm))
1756 				continue;
1757 
1758 			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
1759 
1760 			if (trace->show_arg_names)
1761 				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
1762 
1763 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1764 		}
1765 	} else if (IS_ERR(sc->tp_format)) {
1766 		/*
1767 		 * If we managed to read the tracepoint /format file, then we
1768 		 * may end up not having any args, like with gettid(), so only
1769 		 * print the raw args when we didn't manage to read it.
1770 		 */
1771 		while (arg.idx < sc->nr_args) {
1772 			if (arg.mask & bit)
1773 				goto next_arg;
1774 			val = syscall_arg__val(&arg, arg.idx);
1775 			if (printed)
1776 				printed += scnprintf(bf + printed, size - printed, ", ");
1777 			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1778 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1779 next_arg:
1780 			++arg.idx;
1781 			bit <<= 1;
1782 		}
1783 	}
1784 
1785 	return printed;
1786 }
1787 
1788 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
1789 				  union perf_event *event,
1790 				  struct perf_sample *sample);
1791 
1792 static struct syscall *trace__syscall_info(struct trace *trace,
1793 					   struct evsel *evsel, int id)
1794 {
1795 	int err = 0;
1796 
1797 	if (id < 0) {
1798 
1799 		/*
1800 		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1801 		 * before that, leaving at a higher verbosity level till that is
1802 		 * explained. Reproduced with plain ftrace with:
1803 		 *
1804 		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1805 		 * grep "NR -1 " /t/trace_pipe
1806 		 *
1807 		 * After generating some load on the machine.
1808  		 */
1809 		if (verbose > 1) {
1810 			static u64 n;
1811 			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1812 				id, perf_evsel__name(evsel), ++n);
1813 		}
1814 		return NULL;
1815 	}
1816 
1817 	err = -EINVAL;
1818 
1819 	if (id > trace->sctbl->syscalls.max_id)
1820 		goto out_cant_read;
1821 
1822 	if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
1823 	    (err = trace__read_syscall_info(trace, id)) != 0)
1824 		goto out_cant_read;
1825 
1826 	if (trace->syscalls.table[id].name == NULL) {
1827 		if (trace->syscalls.table[id].nonexistent)
1828 			return NULL;
1829 		goto out_cant_read;
1830 	}
1831 
1832 	return &trace->syscalls.table[id];
1833 
1834 out_cant_read:
1835 	if (verbose > 0) {
1836 		char sbuf[STRERR_BUFSIZE];
1837 		fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
1838 		if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
1839 			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1840 		fputs(" information\n", trace->output);
1841 	}
1842 	return NULL;
1843 }
1844 
1845 static void thread__update_stats(struct thread_trace *ttrace,
1846 				 int id, struct perf_sample *sample)
1847 {
1848 	struct int_node *inode;
1849 	struct stats *stats;
1850 	u64 duration = 0;
1851 
1852 	inode = intlist__findnew(ttrace->syscall_stats, id);
1853 	if (inode == NULL)
1854 		return;
1855 
1856 	stats = inode->priv;
1857 	if (stats == NULL) {
1858 		stats = malloc(sizeof(struct stats));
1859 		if (stats == NULL)
1860 			return;
1861 		init_stats(stats);
1862 		inode->priv = stats;
1863 	}
1864 
1865 	if (ttrace->entry_time && sample->time > ttrace->entry_time)
1866 		duration = sample->time - ttrace->entry_time;
1867 
1868 	update_stats(stats, duration);
1869 }
1870 
1871 static int trace__printf_interrupted_entry(struct trace *trace)
1872 {
1873 	struct thread_trace *ttrace;
1874 	size_t printed;
1875 	int len;
1876 
1877 	if (trace->failure_only || trace->current == NULL)
1878 		return 0;
1879 
1880 	ttrace = thread__priv(trace->current);
1881 
1882 	if (!ttrace->entry_pending)
1883 		return 0;
1884 
1885 	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1886 	printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1887 
1888 	if (len < trace->args_alignment - 4)
1889 		printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1890 
1891 	printed += fprintf(trace->output, " ...\n");
1892 
1893 	ttrace->entry_pending = false;
1894 	++trace->nr_events_printed;
1895 
1896 	return printed;
1897 }
1898 
1899 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
1900 				 struct perf_sample *sample, struct thread *thread)
1901 {
1902 	int printed = 0;
1903 
1904 	if (trace->print_sample) {
1905 		double ts = (double)sample->time / NSEC_PER_MSEC;
1906 
1907 		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1908 				   perf_evsel__name(evsel), ts,
1909 				   thread__comm_str(thread),
1910 				   sample->pid, sample->tid, sample->cpu);
1911 	}
1912 
1913 	return printed;
1914 }
1915 
1916 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
1917 {
1918 	void *augmented_args = NULL;
1919 	/*
1920 	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1921 	 * and there we get all 6 syscall args plus the tracepoint common fields
1922 	 * that gets calculated at the start and the syscall_nr (another long).
1923 	 * So we check if that is the case and if so don't look after the
1924 	 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1925 	 * which is fixed.
1926 	 *
1927 	 * We'll revisit this later to pass s->args_size to the BPF augmenter
1928 	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1929 	 * copies only what we need for each syscall, like what happens when we
1930 	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1931 	 * traffic to just what is needed for each syscall.
1932 	 */
1933 	int args_size = raw_augmented_args_size ?: sc->args_size;
1934 
1935 	*augmented_args_size = sample->raw_size - args_size;
1936 	if (*augmented_args_size > 0)
1937 		augmented_args = sample->raw_data + args_size;
1938 
1939 	return augmented_args;
1940 }
1941 
1942 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
1943 			    union perf_event *event __maybe_unused,
1944 			    struct perf_sample *sample)
1945 {
1946 	char *msg;
1947 	void *args;
1948 	int printed = 0;
1949 	struct thread *thread;
1950 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1951 	int augmented_args_size = 0;
1952 	void *augmented_args = NULL;
1953 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1954 	struct thread_trace *ttrace;
1955 
1956 	if (sc == NULL)
1957 		return -1;
1958 
1959 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1960 	ttrace = thread__trace(thread, trace->output);
1961 	if (ttrace == NULL)
1962 		goto out_put;
1963 
1964 	trace__fprintf_sample(trace, evsel, sample, thread);
1965 
1966 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1967 
1968 	if (ttrace->entry_str == NULL) {
1969 		ttrace->entry_str = malloc(trace__entry_str_size);
1970 		if (!ttrace->entry_str)
1971 			goto out_put;
1972 	}
1973 
1974 	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1975 		trace__printf_interrupted_entry(trace);
1976 	/*
1977 	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1978 	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1979 	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1980 	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1981 	 * so when handling, say the openat syscall, we end up getting 6 args for the
1982 	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1983 	 * thinking that the extra 2 u64 args are the augmented filename, so just check
1984 	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1985 	 */
1986 	if (evsel != trace->syscalls.events.sys_enter)
1987 		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
1988 	ttrace->entry_time = sample->time;
1989 	msg = ttrace->entry_str;
1990 	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1991 
1992 	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1993 					   args, augmented_args, augmented_args_size, trace, thread);
1994 
1995 	if (sc->is_exit) {
1996 		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
1997 			int alignment = 0;
1998 
1999 			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2000 			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2001 			if (trace->args_alignment > printed)
2002 				alignment = trace->args_alignment - printed;
2003 			fprintf(trace->output, "%*s= ?\n", alignment, " ");
2004 		}
2005 	} else {
2006 		ttrace->entry_pending = true;
2007 		/* See trace__vfs_getname & trace__sys_exit */
2008 		ttrace->filename.pending_open = false;
2009 	}
2010 
2011 	if (trace->current != thread) {
2012 		thread__put(trace->current);
2013 		trace->current = thread__get(thread);
2014 	}
2015 	err = 0;
2016 out_put:
2017 	thread__put(thread);
2018 	return err;
2019 }
2020 
2021 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2022 				    struct perf_sample *sample)
2023 {
2024 	struct thread_trace *ttrace;
2025 	struct thread *thread;
2026 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2027 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2028 	char msg[1024];
2029 	void *args, *augmented_args = NULL;
2030 	int augmented_args_size;
2031 
2032 	if (sc == NULL)
2033 		return -1;
2034 
2035 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2036 	ttrace = thread__trace(thread, trace->output);
2037 	/*
2038 	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2039 	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2040 	 */
2041 	if (ttrace == NULL)
2042 		goto out_put;
2043 
2044 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2045 	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2046 	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2047 	fprintf(trace->output, "%s", msg);
2048 	err = 0;
2049 out_put:
2050 	thread__put(thread);
2051 	return err;
2052 }
2053 
2054 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2055 				    struct perf_sample *sample,
2056 				    struct callchain_cursor *cursor)
2057 {
2058 	struct addr_location al;
2059 	int max_stack = evsel->core.attr.sample_max_stack ?
2060 			evsel->core.attr.sample_max_stack :
2061 			trace->max_stack;
2062 	int err;
2063 
2064 	if (machine__resolve(trace->host, &al, sample) < 0)
2065 		return -1;
2066 
2067 	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2068 	addr_location__put(&al);
2069 	return err;
2070 }
2071 
2072 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2073 {
2074 	/* TODO: user-configurable print_opts */
2075 	const unsigned int print_opts = EVSEL__PRINT_SYM |
2076 				        EVSEL__PRINT_DSO |
2077 				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
2078 
2079 	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
2080 }
2081 
2082 static const char *errno_to_name(struct evsel *evsel, int err)
2083 {
2084 	struct perf_env *env = perf_evsel__env(evsel);
2085 	const char *arch_name = perf_env__arch(env);
2086 
2087 	return arch_syscalls__strerrno(arch_name, err);
2088 }
2089 
2090 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2091 			   union perf_event *event __maybe_unused,
2092 			   struct perf_sample *sample)
2093 {
2094 	long ret;
2095 	u64 duration = 0;
2096 	bool duration_calculated = false;
2097 	struct thread *thread;
2098 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2099 	int alignment = trace->args_alignment;
2100 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2101 	struct thread_trace *ttrace;
2102 
2103 	if (sc == NULL)
2104 		return -1;
2105 
2106 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2107 	ttrace = thread__trace(thread, trace->output);
2108 	if (ttrace == NULL)
2109 		goto out_put;
2110 
2111 	trace__fprintf_sample(trace, evsel, sample, thread);
2112 
2113 	if (trace->summary)
2114 		thread__update_stats(ttrace, id, sample);
2115 
2116 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2117 
2118 	if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2119 		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2120 		ttrace->filename.pending_open = false;
2121 		++trace->stats.vfs_getname;
2122 	}
2123 
2124 	if (ttrace->entry_time) {
2125 		duration = sample->time - ttrace->entry_time;
2126 		if (trace__filter_duration(trace, duration))
2127 			goto out;
2128 		duration_calculated = true;
2129 	} else if (trace->duration_filter)
2130 		goto out;
2131 
2132 	if (sample->callchain) {
2133 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2134 		if (callchain_ret == 0) {
2135 			if (callchain_cursor.nr < trace->min_stack)
2136 				goto out;
2137 			callchain_ret = 1;
2138 		}
2139 	}
2140 
2141 	if (trace->summary_only || (ret >= 0 && trace->failure_only))
2142 		goto out;
2143 
2144 	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2145 
2146 	if (ttrace->entry_pending) {
2147 		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2148 	} else {
2149 		printed += fprintf(trace->output, " ... [");
2150 		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2151 		printed += 9;
2152 		printed += fprintf(trace->output, "]: %s()", sc->name);
2153 	}
2154 
2155 	printed++; /* the closing ')' */
2156 
2157 	if (alignment > printed)
2158 		alignment -= printed;
2159 	else
2160 		alignment = 0;
2161 
2162 	fprintf(trace->output, ")%*s= ", alignment, " ");
2163 
2164 	if (sc->fmt == NULL) {
2165 		if (ret < 0)
2166 			goto errno_print;
2167 signed_print:
2168 		fprintf(trace->output, "%ld", ret);
2169 	} else if (ret < 0) {
2170 errno_print: {
2171 		char bf[STRERR_BUFSIZE];
2172 		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2173 			   *e = errno_to_name(evsel, -ret);
2174 
2175 		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2176 	}
2177 	} else if (ret == 0 && sc->fmt->timeout)
2178 		fprintf(trace->output, "0 (Timeout)");
2179 	else if (ttrace->ret_scnprintf) {
2180 		char bf[1024];
2181 		struct syscall_arg arg = {
2182 			.val	= ret,
2183 			.thread	= thread,
2184 			.trace	= trace,
2185 		};
2186 		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2187 		ttrace->ret_scnprintf = NULL;
2188 		fprintf(trace->output, "%s", bf);
2189 	} else if (sc->fmt->hexret)
2190 		fprintf(trace->output, "%#lx", ret);
2191 	else if (sc->fmt->errpid) {
2192 		struct thread *child = machine__find_thread(trace->host, ret, ret);
2193 
2194 		if (child != NULL) {
2195 			fprintf(trace->output, "%ld", ret);
2196 			if (child->comm_set)
2197 				fprintf(trace->output, " (%s)", thread__comm_str(child));
2198 			thread__put(child);
2199 		}
2200 	} else
2201 		goto signed_print;
2202 
2203 	fputc('\n', trace->output);
2204 
2205 	/*
2206 	 * We only consider an 'event' for the sake of --max-events a non-filtered
2207 	 * sys_enter + sys_exit and other tracepoint events.
2208 	 */
2209 	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2210 		interrupted = true;
2211 
2212 	if (callchain_ret > 0)
2213 		trace__fprintf_callchain(trace, sample);
2214 	else if (callchain_ret < 0)
2215 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2216 out:
2217 	ttrace->entry_pending = false;
2218 	err = 0;
2219 out_put:
2220 	thread__put(thread);
2221 	return err;
2222 }
2223 
2224 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2225 			      union perf_event *event __maybe_unused,
2226 			      struct perf_sample *sample)
2227 {
2228 	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2229 	struct thread_trace *ttrace;
2230 	size_t filename_len, entry_str_len, to_move;
2231 	ssize_t remaining_space;
2232 	char *pos;
2233 	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2234 
2235 	if (!thread)
2236 		goto out;
2237 
2238 	ttrace = thread__priv(thread);
2239 	if (!ttrace)
2240 		goto out_put;
2241 
2242 	filename_len = strlen(filename);
2243 	if (filename_len == 0)
2244 		goto out_put;
2245 
2246 	if (ttrace->filename.namelen < filename_len) {
2247 		char *f = realloc(ttrace->filename.name, filename_len + 1);
2248 
2249 		if (f == NULL)
2250 			goto out_put;
2251 
2252 		ttrace->filename.namelen = filename_len;
2253 		ttrace->filename.name = f;
2254 	}
2255 
2256 	strcpy(ttrace->filename.name, filename);
2257 	ttrace->filename.pending_open = true;
2258 
2259 	if (!ttrace->filename.ptr)
2260 		goto out_put;
2261 
2262 	entry_str_len = strlen(ttrace->entry_str);
2263 	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2264 	if (remaining_space <= 0)
2265 		goto out_put;
2266 
2267 	if (filename_len > (size_t)remaining_space) {
2268 		filename += filename_len - remaining_space;
2269 		filename_len = remaining_space;
2270 	}
2271 
2272 	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2273 	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2274 	memmove(pos + filename_len, pos, to_move);
2275 	memcpy(pos, filename, filename_len);
2276 
2277 	ttrace->filename.ptr = 0;
2278 	ttrace->filename.entry_str_pos = 0;
2279 out_put:
2280 	thread__put(thread);
2281 out:
2282 	return 0;
2283 }
2284 
2285 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2286 				     union perf_event *event __maybe_unused,
2287 				     struct perf_sample *sample)
2288 {
2289         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2290 	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2291 	struct thread *thread = machine__findnew_thread(trace->host,
2292 							sample->pid,
2293 							sample->tid);
2294 	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2295 
2296 	if (ttrace == NULL)
2297 		goto out_dump;
2298 
2299 	ttrace->runtime_ms += runtime_ms;
2300 	trace->runtime_ms += runtime_ms;
2301 out_put:
2302 	thread__put(thread);
2303 	return 0;
2304 
2305 out_dump:
2306 	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2307 	       evsel->name,
2308 	       perf_evsel__strval(evsel, sample, "comm"),
2309 	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2310 	       runtime,
2311 	       perf_evsel__intval(evsel, sample, "vruntime"));
2312 	goto out_put;
2313 }
2314 
2315 static int bpf_output__printer(enum binary_printer_ops op,
2316 			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2317 {
2318 	unsigned char ch = (unsigned char)val;
2319 
2320 	switch (op) {
2321 	case BINARY_PRINT_CHAR_DATA:
2322 		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2323 	case BINARY_PRINT_DATA_BEGIN:
2324 	case BINARY_PRINT_LINE_BEGIN:
2325 	case BINARY_PRINT_ADDR:
2326 	case BINARY_PRINT_NUM_DATA:
2327 	case BINARY_PRINT_NUM_PAD:
2328 	case BINARY_PRINT_SEP:
2329 	case BINARY_PRINT_CHAR_PAD:
2330 	case BINARY_PRINT_LINE_END:
2331 	case BINARY_PRINT_DATA_END:
2332 	default:
2333 		break;
2334 	}
2335 
2336 	return 0;
2337 }
2338 
2339 static void bpf_output__fprintf(struct trace *trace,
2340 				struct perf_sample *sample)
2341 {
2342 	binary__fprintf(sample->raw_data, sample->raw_size, 8,
2343 			bpf_output__printer, NULL, trace->output);
2344 	++trace->nr_events_printed;
2345 }
2346 
2347 static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2348 				union perf_event *event __maybe_unused,
2349 				struct perf_sample *sample)
2350 {
2351 	struct thread *thread;
2352 	int callchain_ret = 0;
2353 	/*
2354 	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2355 	 * this event's max_events having been hit and this is an entry coming
2356 	 * from the ring buffer that we should discard, since the max events
2357 	 * have already been considered/printed.
2358 	 */
2359 	if (evsel->disabled)
2360 		return 0;
2361 
2362 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2363 
2364 	if (sample->callchain) {
2365 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2366 		if (callchain_ret == 0) {
2367 			if (callchain_cursor.nr < trace->min_stack)
2368 				goto out;
2369 			callchain_ret = 1;
2370 		}
2371 	}
2372 
2373 	trace__printf_interrupted_entry(trace);
2374 	trace__fprintf_tstamp(trace, sample->time, trace->output);
2375 
2376 	if (trace->trace_syscalls && trace->show_duration)
2377 		fprintf(trace->output, "(         ): ");
2378 
2379 	if (thread)
2380 		trace__fprintf_comm_tid(trace, thread, trace->output);
2381 
2382 	if (evsel == trace->syscalls.events.augmented) {
2383 		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2384 		struct syscall *sc = trace__syscall_info(trace, evsel, id);
2385 
2386 		if (sc) {
2387 			fprintf(trace->output, "%s(", sc->name);
2388 			trace__fprintf_sys_enter(trace, evsel, sample);
2389 			fputc(')', trace->output);
2390 			goto newline;
2391 		}
2392 
2393 		/*
2394 		 * XXX: Not having the associated syscall info or not finding/adding
2395 		 * 	the thread should never happen, but if it does...
2396 		 * 	fall thru and print it as a bpf_output event.
2397 		 */
2398 	}
2399 
2400 	fprintf(trace->output, "%s:", evsel->name);
2401 
2402 	if (perf_evsel__is_bpf_output(evsel)) {
2403 		bpf_output__fprintf(trace, sample);
2404 	} else if (evsel->tp_format) {
2405 		if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2406 		    trace__fprintf_sys_enter(trace, evsel, sample)) {
2407 			event_format__fprintf(evsel->tp_format, sample->cpu,
2408 					      sample->raw_data, sample->raw_size,
2409 					      trace->output);
2410 			++trace->nr_events_printed;
2411 
2412 			if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2413 				evsel__disable(evsel);
2414 				evsel__close(evsel);
2415 			}
2416 		}
2417 	}
2418 
2419 newline:
2420 	fprintf(trace->output, "\n");
2421 
2422 	if (callchain_ret > 0)
2423 		trace__fprintf_callchain(trace, sample);
2424 	else if (callchain_ret < 0)
2425 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2426 out:
2427 	thread__put(thread);
2428 	return 0;
2429 }
2430 
2431 static void print_location(FILE *f, struct perf_sample *sample,
2432 			   struct addr_location *al,
2433 			   bool print_dso, bool print_sym)
2434 {
2435 
2436 	if ((verbose > 0 || print_dso) && al->map)
2437 		fprintf(f, "%s@", al->map->dso->long_name);
2438 
2439 	if ((verbose > 0 || print_sym) && al->sym)
2440 		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2441 			al->addr - al->sym->start);
2442 	else if (al->map)
2443 		fprintf(f, "0x%" PRIx64, al->addr);
2444 	else
2445 		fprintf(f, "0x%" PRIx64, sample->addr);
2446 }
2447 
2448 static int trace__pgfault(struct trace *trace,
2449 			  struct evsel *evsel,
2450 			  union perf_event *event __maybe_unused,
2451 			  struct perf_sample *sample)
2452 {
2453 	struct thread *thread;
2454 	struct addr_location al;
2455 	char map_type = 'd';
2456 	struct thread_trace *ttrace;
2457 	int err = -1;
2458 	int callchain_ret = 0;
2459 
2460 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2461 
2462 	if (sample->callchain) {
2463 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2464 		if (callchain_ret == 0) {
2465 			if (callchain_cursor.nr < trace->min_stack)
2466 				goto out_put;
2467 			callchain_ret = 1;
2468 		}
2469 	}
2470 
2471 	ttrace = thread__trace(thread, trace->output);
2472 	if (ttrace == NULL)
2473 		goto out_put;
2474 
2475 	if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2476 		ttrace->pfmaj++;
2477 	else
2478 		ttrace->pfmin++;
2479 
2480 	if (trace->summary_only)
2481 		goto out;
2482 
2483 	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2484 
2485 	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2486 
2487 	fprintf(trace->output, "%sfault [",
2488 		evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2489 		"maj" : "min");
2490 
2491 	print_location(trace->output, sample, &al, false, true);
2492 
2493 	fprintf(trace->output, "] => ");
2494 
2495 	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2496 
2497 	if (!al.map) {
2498 		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2499 
2500 		if (al.map)
2501 			map_type = 'x';
2502 		else
2503 			map_type = '?';
2504 	}
2505 
2506 	print_location(trace->output, sample, &al, true, false);
2507 
2508 	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2509 
2510 	if (callchain_ret > 0)
2511 		trace__fprintf_callchain(trace, sample);
2512 	else if (callchain_ret < 0)
2513 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2514 
2515 	++trace->nr_events_printed;
2516 out:
2517 	err = 0;
2518 out_put:
2519 	thread__put(thread);
2520 	return err;
2521 }
2522 
2523 static void trace__set_base_time(struct trace *trace,
2524 				 struct evsel *evsel,
2525 				 struct perf_sample *sample)
2526 {
2527 	/*
2528 	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2529 	 * and don't use sample->time unconditionally, we may end up having
2530 	 * some other event in the future without PERF_SAMPLE_TIME for good
2531 	 * reason, i.e. we may not be interested in its timestamps, just in
2532 	 * it taking place, picking some piece of information when it
2533 	 * appears in our event stream (vfs_getname comes to mind).
2534 	 */
2535 	if (trace->base_time == 0 && !trace->full_time &&
2536 	    (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2537 		trace->base_time = sample->time;
2538 }
2539 
2540 static int trace__process_sample(struct perf_tool *tool,
2541 				 union perf_event *event,
2542 				 struct perf_sample *sample,
2543 				 struct evsel *evsel,
2544 				 struct machine *machine __maybe_unused)
2545 {
2546 	struct trace *trace = container_of(tool, struct trace, tool);
2547 	struct thread *thread;
2548 	int err = 0;
2549 
2550 	tracepoint_handler handler = evsel->handler;
2551 
2552 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2553 	if (thread && thread__is_filtered(thread))
2554 		goto out;
2555 
2556 	trace__set_base_time(trace, evsel, sample);
2557 
2558 	if (handler) {
2559 		++trace->nr_events;
2560 		handler(trace, evsel, event, sample);
2561 	}
2562 out:
2563 	thread__put(thread);
2564 	return err;
2565 }
2566 
2567 static int trace__record(struct trace *trace, int argc, const char **argv)
2568 {
2569 	unsigned int rec_argc, i, j;
2570 	const char **rec_argv;
2571 	const char * const record_args[] = {
2572 		"record",
2573 		"-R",
2574 		"-m", "1024",
2575 		"-c", "1",
2576 	};
2577 
2578 	const char * const sc_args[] = { "-e", };
2579 	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2580 	const char * const majpf_args[] = { "-e", "major-faults" };
2581 	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2582 	const char * const minpf_args[] = { "-e", "minor-faults" };
2583 	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2584 
2585 	/* +1 is for the event string below */
2586 	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2587 		majpf_args_nr + minpf_args_nr + argc;
2588 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2589 
2590 	if (rec_argv == NULL)
2591 		return -ENOMEM;
2592 
2593 	j = 0;
2594 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2595 		rec_argv[j++] = record_args[i];
2596 
2597 	if (trace->trace_syscalls) {
2598 		for (i = 0; i < sc_args_nr; i++)
2599 			rec_argv[j++] = sc_args[i];
2600 
2601 		/* event string may be different for older kernels - e.g., RHEL6 */
2602 		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2603 			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2604 		else if (is_valid_tracepoint("syscalls:sys_enter"))
2605 			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2606 		else {
2607 			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2608 			free(rec_argv);
2609 			return -1;
2610 		}
2611 	}
2612 
2613 	if (trace->trace_pgfaults & TRACE_PFMAJ)
2614 		for (i = 0; i < majpf_args_nr; i++)
2615 			rec_argv[j++] = majpf_args[i];
2616 
2617 	if (trace->trace_pgfaults & TRACE_PFMIN)
2618 		for (i = 0; i < minpf_args_nr; i++)
2619 			rec_argv[j++] = minpf_args[i];
2620 
2621 	for (i = 0; i < (unsigned int)argc; i++)
2622 		rec_argv[j++] = argv[i];
2623 
2624 	return cmd_record(j, rec_argv);
2625 }
2626 
2627 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2628 
2629 static bool evlist__add_vfs_getname(struct evlist *evlist)
2630 {
2631 	bool found = false;
2632 	struct evsel *evsel, *tmp;
2633 	struct parse_events_error err = { .idx = 0, };
2634 	int ret = parse_events(evlist, "probe:vfs_getname*", &err);
2635 
2636 	if (ret)
2637 		return false;
2638 
2639 	evlist__for_each_entry_safe(evlist, evsel, tmp) {
2640 		if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
2641 			continue;
2642 
2643 		if (perf_evsel__field(evsel, "pathname")) {
2644 			evsel->handler = trace__vfs_getname;
2645 			found = true;
2646 			continue;
2647 		}
2648 
2649 		list_del_init(&evsel->core.node);
2650 		evsel->evlist = NULL;
2651 		evsel__delete(evsel);
2652 	}
2653 
2654 	return found;
2655 }
2656 
2657 static struct evsel *perf_evsel__new_pgfault(u64 config)
2658 {
2659 	struct evsel *evsel;
2660 	struct perf_event_attr attr = {
2661 		.type = PERF_TYPE_SOFTWARE,
2662 		.mmap_data = 1,
2663 	};
2664 
2665 	attr.config = config;
2666 	attr.sample_period = 1;
2667 
2668 	event_attr_init(&attr);
2669 
2670 	evsel = evsel__new(&attr);
2671 	if (evsel)
2672 		evsel->handler = trace__pgfault;
2673 
2674 	return evsel;
2675 }
2676 
2677 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2678 {
2679 	const u32 type = event->header.type;
2680 	struct evsel *evsel;
2681 
2682 	if (type != PERF_RECORD_SAMPLE) {
2683 		trace__process_event(trace, trace->host, event, sample);
2684 		return;
2685 	}
2686 
2687 	evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2688 	if (evsel == NULL) {
2689 		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2690 		return;
2691 	}
2692 
2693 	if (evswitch__discard(&trace->evswitch, evsel))
2694 		return;
2695 
2696 	trace__set_base_time(trace, evsel, sample);
2697 
2698 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
2699 	    sample->raw_data == NULL) {
2700 		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2701 		       perf_evsel__name(evsel), sample->tid,
2702 		       sample->cpu, sample->raw_size);
2703 	} else {
2704 		tracepoint_handler handler = evsel->handler;
2705 		handler(trace, evsel, event, sample);
2706 	}
2707 
2708 	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
2709 		interrupted = true;
2710 }
2711 
2712 static int trace__add_syscall_newtp(struct trace *trace)
2713 {
2714 	int ret = -1;
2715 	struct evlist *evlist = trace->evlist;
2716 	struct evsel *sys_enter, *sys_exit;
2717 
2718 	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
2719 	if (sys_enter == NULL)
2720 		goto out;
2721 
2722 	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2723 		goto out_delete_sys_enter;
2724 
2725 	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
2726 	if (sys_exit == NULL)
2727 		goto out_delete_sys_enter;
2728 
2729 	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2730 		goto out_delete_sys_exit;
2731 
2732 	perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2733 	perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2734 
2735 	evlist__add(evlist, sys_enter);
2736 	evlist__add(evlist, sys_exit);
2737 
2738 	if (callchain_param.enabled && !trace->kernel_syscallchains) {
2739 		/*
2740 		 * We're interested only in the user space callchain
2741 		 * leading to the syscall, allow overriding that for
2742 		 * debugging reasons using --kernel_syscall_callchains
2743 		 */
2744 		sys_exit->core.attr.exclude_callchain_kernel = 1;
2745 	}
2746 
2747 	trace->syscalls.events.sys_enter = sys_enter;
2748 	trace->syscalls.events.sys_exit  = sys_exit;
2749 
2750 	ret = 0;
2751 out:
2752 	return ret;
2753 
2754 out_delete_sys_exit:
2755 	evsel__delete_priv(sys_exit);
2756 out_delete_sys_enter:
2757 	evsel__delete_priv(sys_enter);
2758 	goto out;
2759 }
2760 
2761 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
2762 {
2763 	int err = -1;
2764 	struct evsel *sys_exit;
2765 	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2766 						trace->ev_qualifier_ids.nr,
2767 						trace->ev_qualifier_ids.entries);
2768 
2769 	if (filter == NULL)
2770 		goto out_enomem;
2771 
2772 	if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2773 					  filter)) {
2774 		sys_exit = trace->syscalls.events.sys_exit;
2775 		err = perf_evsel__append_tp_filter(sys_exit, filter);
2776 	}
2777 
2778 	free(filter);
2779 out:
2780 	return err;
2781 out_enomem:
2782 	errno = ENOMEM;
2783 	goto out;
2784 }
2785 
2786 #ifdef HAVE_LIBBPF_SUPPORT
2787 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
2788 {
2789 	if (trace->bpf_obj == NULL)
2790 		return NULL;
2791 
2792 	return bpf_object__find_program_by_title(trace->bpf_obj, name);
2793 }
2794 
2795 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
2796 							const char *prog_name, const char *type)
2797 {
2798 	struct bpf_program *prog;
2799 
2800 	if (prog_name == NULL) {
2801 		char default_prog_name[256];
2802 		scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
2803 		prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2804 		if (prog != NULL)
2805 			goto out_found;
2806 		if (sc->fmt && sc->fmt->alias) {
2807 			scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
2808 			prog = trace__find_bpf_program_by_title(trace, default_prog_name);
2809 			if (prog != NULL)
2810 				goto out_found;
2811 		}
2812 		goto out_unaugmented;
2813 	}
2814 
2815 	prog = trace__find_bpf_program_by_title(trace, prog_name);
2816 
2817 	if (prog != NULL) {
2818 out_found:
2819 		return prog;
2820 	}
2821 
2822 	pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2823 		 prog_name, type, sc->name);
2824 out_unaugmented:
2825 	return trace->syscalls.unaugmented_prog;
2826 }
2827 
2828 static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
2829 {
2830 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2831 
2832 	if (sc == NULL)
2833 		return;
2834 
2835 	sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
2836 	sc->bpf_prog.sys_exit  = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit  : NULL,  "exit");
2837 }
2838 
2839 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
2840 {
2841 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2842 	return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2843 }
2844 
2845 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
2846 {
2847 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2848 	return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
2849 }
2850 
2851 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
2852 {
2853 	struct syscall *sc = trace__syscall_info(trace, NULL, id);
2854 	int arg = 0;
2855 
2856 	if (sc == NULL)
2857 		goto out;
2858 
2859 	for (; arg < sc->nr_args; ++arg) {
2860 		entry->string_args_len[arg] = 0;
2861 		if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
2862 			/* Should be set like strace -s strsize */
2863 			entry->string_args_len[arg] = PATH_MAX;
2864 		}
2865 	}
2866 out:
2867 	for (; arg < 6; ++arg)
2868 		entry->string_args_len[arg] = 0;
2869 }
2870 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
2871 {
2872 	int fd = bpf_map__fd(trace->syscalls.map);
2873 	struct bpf_map_syscall_entry value = {
2874 		.enabled = !trace->not_ev_qualifier,
2875 	};
2876 	int err = 0;
2877 	size_t i;
2878 
2879 	for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
2880 		int key = trace->ev_qualifier_ids.entries[i];
2881 
2882 		if (value.enabled) {
2883 			trace__init_bpf_map_syscall_args(trace, key, &value);
2884 			trace__init_syscall_bpf_progs(trace, key);
2885 		}
2886 
2887 		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
2888 		if (err)
2889 			break;
2890 	}
2891 
2892 	return err;
2893 }
2894 
2895 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
2896 {
2897 	int fd = bpf_map__fd(trace->syscalls.map);
2898 	struct bpf_map_syscall_entry value = {
2899 		.enabled = enabled,
2900 	};
2901 	int err = 0, key;
2902 
2903 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
2904 		if (enabled)
2905 			trace__init_bpf_map_syscall_args(trace, key, &value);
2906 
2907 		err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
2908 		if (err)
2909 			break;
2910 	}
2911 
2912 	return err;
2913 }
2914 
2915 static int trace__init_syscalls_bpf_map(struct trace *trace)
2916 {
2917 	bool enabled = true;
2918 
2919 	if (trace->ev_qualifier_ids.nr)
2920 		enabled = trace->not_ev_qualifier;
2921 
2922 	return __trace__init_syscalls_bpf_map(trace, enabled);
2923 }
2924 
2925 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
2926 {
2927 	struct tep_format_field *field, *candidate_field;
2928 	int id;
2929 
2930 	/*
2931 	 * We're only interested in syscalls that have a pointer:
2932 	 */
2933 	for (field = sc->args; field; field = field->next) {
2934 		if (field->flags & TEP_FIELD_IS_POINTER)
2935 			goto try_to_find_pair;
2936 	}
2937 
2938 	return NULL;
2939 
2940 try_to_find_pair:
2941 	for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
2942 		struct syscall *pair = trace__syscall_info(trace, NULL, id);
2943 		struct bpf_program *pair_prog;
2944 		bool is_candidate = false;
2945 
2946 		if (pair == NULL || pair == sc ||
2947 		    pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
2948 			continue;
2949 
2950 		for (field = sc->args, candidate_field = pair->args;
2951 		     field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
2952 			bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
2953 			     candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
2954 
2955 			if (is_pointer) {
2956 			       if (!candidate_is_pointer) {
2957 					// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2958 					continue;
2959 			       }
2960 			} else {
2961 				if (candidate_is_pointer) {
2962 					// The candidate might copy a pointer we don't have, skip it.
2963 					goto next_candidate;
2964 				}
2965 				continue;
2966 			}
2967 
2968 			if (strcmp(field->type, candidate_field->type))
2969 				goto next_candidate;
2970 
2971 			is_candidate = true;
2972 		}
2973 
2974 		if (!is_candidate)
2975 			goto next_candidate;
2976 
2977 		/*
2978 		 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2979 		 * then it may be collecting that and we then can't use it, as it would collect
2980 		 * more than what is common to the two syscalls.
2981 		 */
2982 		if (candidate_field) {
2983 			for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
2984 				if (candidate_field->flags & TEP_FIELD_IS_POINTER)
2985 					goto next_candidate;
2986 		}
2987 
2988 		pair_prog = pair->bpf_prog.sys_enter;
2989 		/*
2990 		 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2991 		 * have been searched for, so search it here and if it returns the
2992 		 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2993 		 * program for a filtered syscall on a non-filtered one.
2994 		 *
2995 		 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2996 		 * useful for "renameat2".
2997 		 */
2998 		if (pair_prog == NULL) {
2999 			pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3000 			if (pair_prog == trace->syscalls.unaugmented_prog)
3001 				goto next_candidate;
3002 		}
3003 
3004 		pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3005 		return pair_prog;
3006 	next_candidate:
3007 		continue;
3008 	}
3009 
3010 	return NULL;
3011 }
3012 
3013 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3014 {
3015 	int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3016 	    map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3017 	int err = 0, key;
3018 
3019 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3020 		int prog_fd;
3021 
3022 		if (!trace__syscall_enabled(trace, key))
3023 			continue;
3024 
3025 		trace__init_syscall_bpf_progs(trace, key);
3026 
3027 		// It'll get at least the "!raw_syscalls:unaugmented"
3028 		prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3029 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3030 		if (err)
3031 			break;
3032 		prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3033 		err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3034 		if (err)
3035 			break;
3036 	}
3037 
3038 	/*
3039 	 * Now lets do a second pass looking for enabled syscalls without
3040 	 * an augmenter that have a signature that is a superset of another
3041 	 * syscall with an augmenter so that we can auto-reuse it.
3042 	 *
3043 	 * I.e. if we have an augmenter for the "open" syscall that has
3044 	 * this signature:
3045 	 *
3046 	 *   int open(const char *pathname, int flags, mode_t mode);
3047 	 *
3048 	 * I.e. that will collect just the first string argument, then we
3049 	 * can reuse it for the 'creat' syscall, that has this signature:
3050 	 *
3051 	 *   int creat(const char *pathname, mode_t mode);
3052 	 *
3053 	 * and for:
3054 	 *
3055 	 *   int stat(const char *pathname, struct stat *statbuf);
3056 	 *   int lstat(const char *pathname, struct stat *statbuf);
3057 	 *
3058 	 * Because the 'open' augmenter will collect the first arg as a string,
3059 	 * and leave alone all the other args, which already helps with
3060 	 * beautifying 'stat' and 'lstat''s pathname arg.
3061 	 *
3062 	 * Then, in time, when 'stat' gets an augmenter that collects both
3063 	 * first and second arg (this one on the raw_syscalls:sys_exit prog
3064 	 * array tail call, then that one will be used.
3065 	 */
3066 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3067 		struct syscall *sc = trace__syscall_info(trace, NULL, key);
3068 		struct bpf_program *pair_prog;
3069 		int prog_fd;
3070 
3071 		if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3072 			continue;
3073 
3074 		/*
3075 		 * For now we're just reusing the sys_enter prog, and if it
3076 		 * already has an augmenter, we don't need to find one.
3077 		 */
3078 		if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3079 			continue;
3080 
3081 		/*
3082 		 * Look at all the other syscalls for one that has a signature
3083 		 * that is close enough that we can share:
3084 		 */
3085 		pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3086 		if (pair_prog == NULL)
3087 			continue;
3088 
3089 		sc->bpf_prog.sys_enter = pair_prog;
3090 
3091 		/*
3092 		 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3093 		 * with the fd for the program we're reusing:
3094 		 */
3095 		prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3096 		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3097 		if (err)
3098 			break;
3099 	}
3100 
3101 
3102 	return err;
3103 }
3104 #else
3105 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3106 {
3107 	return 0;
3108 }
3109 
3110 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3111 {
3112 	return 0;
3113 }
3114 
3115 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3116 							    const char *name __maybe_unused)
3117 {
3118 	return NULL;
3119 }
3120 
3121 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3122 {
3123 	return 0;
3124 }
3125 #endif // HAVE_LIBBPF_SUPPORT
3126 
3127 static int trace__set_ev_qualifier_filter(struct trace *trace)
3128 {
3129 	if (trace->syscalls.map)
3130 		return trace__set_ev_qualifier_bpf_filter(trace);
3131 	if (trace->syscalls.events.sys_enter)
3132 		return trace__set_ev_qualifier_tp_filter(trace);
3133 	return 0;
3134 }
3135 
3136 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3137 				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3138 {
3139 	int err = 0;
3140 #ifdef HAVE_LIBBPF_SUPPORT
3141 	bool value = true;
3142 	int map_fd = bpf_map__fd(map);
3143 	size_t i;
3144 
3145 	for (i = 0; i < npids; ++i) {
3146 		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3147 		if (err)
3148 			break;
3149 	}
3150 #endif
3151 	return err;
3152 }
3153 
3154 static int trace__set_filter_loop_pids(struct trace *trace)
3155 {
3156 	unsigned int nr = 1, err;
3157 	pid_t pids[32] = {
3158 		getpid(),
3159 	};
3160 	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3161 
3162 	while (thread && nr < ARRAY_SIZE(pids)) {
3163 		struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3164 
3165 		if (parent == NULL)
3166 			break;
3167 
3168 		if (!strcmp(thread__comm_str(parent), "sshd") ||
3169 		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
3170 			pids[nr++] = parent->tid;
3171 			break;
3172 		}
3173 		thread = parent;
3174 	}
3175 
3176 	err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
3177 	if (!err && trace->filter_pids.map)
3178 		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3179 
3180 	return err;
3181 }
3182 
3183 static int trace__set_filter_pids(struct trace *trace)
3184 {
3185 	int err = 0;
3186 	/*
3187 	 * Better not use !target__has_task() here because we need to cover the
3188 	 * case where no threads were specified in the command line, but a
3189 	 * workload was, and in that case we will fill in the thread_map when
3190 	 * we fork the workload in perf_evlist__prepare_workload.
3191 	 */
3192 	if (trace->filter_pids.nr > 0) {
3193 		err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3194 						      trace->filter_pids.entries);
3195 		if (!err && trace->filter_pids.map) {
3196 			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3197 						       trace->filter_pids.entries);
3198 		}
3199 	} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3200 		err = trace__set_filter_loop_pids(trace);
3201 	}
3202 
3203 	return err;
3204 }
3205 
3206 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3207 {
3208 	struct evlist *evlist = trace->evlist;
3209 	struct perf_sample sample;
3210 	int err;
3211 
3212 	err = perf_evlist__parse_sample(evlist, event, &sample);
3213 	if (err)
3214 		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3215 	else
3216 		trace__handle_event(trace, event, &sample);
3217 
3218 	return 0;
3219 }
3220 
3221 static int __trace__flush_events(struct trace *trace)
3222 {
3223 	u64 first = ordered_events__first_time(&trace->oe.data);
3224 	u64 flush = trace->oe.last - NSEC_PER_SEC;
3225 
3226 	/* Is there some thing to flush.. */
3227 	if (first && first < flush)
3228 		return ordered_events__flush_time(&trace->oe.data, flush);
3229 
3230 	return 0;
3231 }
3232 
3233 static int trace__flush_events(struct trace *trace)
3234 {
3235 	return !trace->sort_events ? 0 : __trace__flush_events(trace);
3236 }
3237 
3238 static int trace__deliver_event(struct trace *trace, union perf_event *event)
3239 {
3240 	int err;
3241 
3242 	if (!trace->sort_events)
3243 		return __trace__deliver_event(trace, event);
3244 
3245 	err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3246 	if (err && err != -1)
3247 		return err;
3248 
3249 	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3250 	if (err)
3251 		return err;
3252 
3253 	return trace__flush_events(trace);
3254 }
3255 
3256 static int ordered_events__deliver_event(struct ordered_events *oe,
3257 					 struct ordered_event *event)
3258 {
3259 	struct trace *trace = container_of(oe, struct trace, oe.data);
3260 
3261 	return __trace__deliver_event(trace, event->event);
3262 }
3263 
3264 static int trace__run(struct trace *trace, int argc, const char **argv)
3265 {
3266 	struct evlist *evlist = trace->evlist;
3267 	struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3268 	int err = -1, i;
3269 	unsigned long before;
3270 	const bool forks = argc > 0;
3271 	bool draining = false;
3272 
3273 	trace->live = true;
3274 
3275 	if (!trace->raw_augmented_syscalls) {
3276 		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3277 			goto out_error_raw_syscalls;
3278 
3279 		if (trace->trace_syscalls)
3280 			trace->vfs_getname = evlist__add_vfs_getname(evlist);
3281 	}
3282 
3283 	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3284 		pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3285 		if (pgfault_maj == NULL)
3286 			goto out_error_mem;
3287 		perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3288 		evlist__add(evlist, pgfault_maj);
3289 	}
3290 
3291 	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3292 		pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3293 		if (pgfault_min == NULL)
3294 			goto out_error_mem;
3295 		perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3296 		evlist__add(evlist, pgfault_min);
3297 	}
3298 
3299 	if (trace->sched &&
3300 	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
3301 				   trace__sched_stat_runtime))
3302 		goto out_error_sched_stat_runtime;
3303 
3304 	/*
3305 	 * If a global cgroup was set, apply it to all the events without an
3306 	 * explicit cgroup. I.e.:
3307 	 *
3308 	 * 	trace -G A -e sched:*switch
3309 	 *
3310 	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3311 	 * _and_ sched:sched_switch to the 'A' cgroup, while:
3312 	 *
3313 	 * trace -e sched:*switch -G A
3314 	 *
3315 	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3316 	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3317 	 * a cgroup (on the root cgroup, sys wide, etc).
3318 	 *
3319 	 * Multiple cgroups:
3320 	 *
3321 	 * trace -G A -e sched:*switch -G B
3322 	 *
3323 	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3324 	 * to the 'B' cgroup.
3325 	 *
3326 	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3327 	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3328 	 */
3329 	if (trace->cgroup)
3330 		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3331 
3332 	err = perf_evlist__create_maps(evlist, &trace->opts.target);
3333 	if (err < 0) {
3334 		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3335 		goto out_delete_evlist;
3336 	}
3337 
3338 	err = trace__symbols_init(trace, evlist);
3339 	if (err < 0) {
3340 		fprintf(trace->output, "Problems initializing symbol libraries!\n");
3341 		goto out_delete_evlist;
3342 	}
3343 
3344 	perf_evlist__config(evlist, &trace->opts, &callchain_param);
3345 
3346 	signal(SIGCHLD, sig_handler);
3347 	signal(SIGINT, sig_handler);
3348 
3349 	if (forks) {
3350 		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
3351 						    argv, false, NULL);
3352 		if (err < 0) {
3353 			fprintf(trace->output, "Couldn't run the workload!\n");
3354 			goto out_delete_evlist;
3355 		}
3356 	}
3357 
3358 	err = evlist__open(evlist);
3359 	if (err < 0)
3360 		goto out_error_open;
3361 
3362 	err = bpf__apply_obj_config();
3363 	if (err) {
3364 		char errbuf[BUFSIZ];
3365 
3366 		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3367 		pr_err("ERROR: Apply config to BPF failed: %s\n",
3368 			 errbuf);
3369 		goto out_error_open;
3370 	}
3371 
3372 	err = trace__set_filter_pids(trace);
3373 	if (err < 0)
3374 		goto out_error_mem;
3375 
3376 	if (trace->syscalls.map)
3377 		trace__init_syscalls_bpf_map(trace);
3378 
3379 	if (trace->syscalls.prog_array.sys_enter)
3380 		trace__init_syscalls_bpf_prog_array_maps(trace);
3381 
3382 	if (trace->ev_qualifier_ids.nr > 0) {
3383 		err = trace__set_ev_qualifier_filter(trace);
3384 		if (err < 0)
3385 			goto out_errno;
3386 
3387 		if (trace->syscalls.events.sys_exit) {
3388 			pr_debug("event qualifier tracepoint filter: %s\n",
3389 				 trace->syscalls.events.sys_exit->filter);
3390 		}
3391 	}
3392 
3393 	/*
3394 	 * If the "close" syscall is not traced, then we will not have the
3395 	 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3396 	 * fd->pathname table and were ending up showing the last value set by
3397 	 * syscalls opening a pathname and associating it with a descriptor or
3398 	 * reading it from /proc/pid/fd/ in cases where that doesn't make
3399 	 * sense.
3400 	 *
3401 	 *  So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3402 	 *  not in use.
3403 	 */
3404 	trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3405 
3406 	err = perf_evlist__apply_filters(evlist, &evsel);
3407 	if (err < 0)
3408 		goto out_error_apply_filters;
3409 
3410 	if (trace->dump.map)
3411 		bpf_map__fprintf(trace->dump.map, trace->output);
3412 
3413 	err = evlist__mmap(evlist, trace->opts.mmap_pages);
3414 	if (err < 0)
3415 		goto out_error_mmap;
3416 
3417 	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3418 		evlist__enable(evlist);
3419 
3420 	if (forks)
3421 		perf_evlist__start_workload(evlist);
3422 
3423 	if (trace->opts.initial_delay) {
3424 		usleep(trace->opts.initial_delay * 1000);
3425 		evlist__enable(evlist);
3426 	}
3427 
3428 	trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3429 				  evlist->core.threads->nr > 1 ||
3430 				  perf_evlist__first(evlist)->core.attr.inherit;
3431 
3432 	/*
3433 	 * Now that we already used evsel->core.attr to ask the kernel to setup the
3434 	 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3435 	 * trace__resolve_callchain(), allowing per-event max-stack settings
3436 	 * to override an explicitly set --max-stack global setting.
3437 	 */
3438 	evlist__for_each_entry(evlist, evsel) {
3439 		if (evsel__has_callchain(evsel) &&
3440 		    evsel->core.attr.sample_max_stack == 0)
3441 			evsel->core.attr.sample_max_stack = trace->max_stack;
3442 	}
3443 again:
3444 	before = trace->nr_events;
3445 
3446 	for (i = 0; i < evlist->nr_mmaps; i++) {
3447 		union perf_event *event;
3448 		struct mmap *md;
3449 
3450 		md = &evlist->mmap[i];
3451 		if (perf_mmap__read_init(md) < 0)
3452 			continue;
3453 
3454 		while ((event = perf_mmap__read_event(md)) != NULL) {
3455 			++trace->nr_events;
3456 
3457 			err = trace__deliver_event(trace, event);
3458 			if (err)
3459 				goto out_disable;
3460 
3461 			perf_mmap__consume(md);
3462 
3463 			if (interrupted)
3464 				goto out_disable;
3465 
3466 			if (done && !draining) {
3467 				evlist__disable(evlist);
3468 				draining = true;
3469 			}
3470 		}
3471 		perf_mmap__read_done(md);
3472 	}
3473 
3474 	if (trace->nr_events == before) {
3475 		int timeout = done ? 100 : -1;
3476 
3477 		if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
3478 			if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
3479 				draining = true;
3480 
3481 			goto again;
3482 		} else {
3483 			if (trace__flush_events(trace))
3484 				goto out_disable;
3485 		}
3486 	} else {
3487 		goto again;
3488 	}
3489 
3490 out_disable:
3491 	thread__zput(trace->current);
3492 
3493 	evlist__disable(evlist);
3494 
3495 	if (trace->sort_events)
3496 		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
3497 
3498 	if (!err) {
3499 		if (trace->summary)
3500 			trace__fprintf_thread_summary(trace, trace->output);
3501 
3502 		if (trace->show_tool_stats) {
3503 			fprintf(trace->output, "Stats:\n "
3504 					       " vfs_getname : %" PRIu64 "\n"
3505 					       " proc_getname: %" PRIu64 "\n",
3506 				trace->stats.vfs_getname,
3507 				trace->stats.proc_getname);
3508 		}
3509 	}
3510 
3511 out_delete_evlist:
3512 	trace__symbols__exit(trace);
3513 
3514 	evlist__delete(evlist);
3515 	cgroup__put(trace->cgroup);
3516 	trace->evlist = NULL;
3517 	trace->live = false;
3518 	return err;
3519 {
3520 	char errbuf[BUFSIZ];
3521 
3522 out_error_sched_stat_runtime:
3523 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
3524 	goto out_error;
3525 
3526 out_error_raw_syscalls:
3527 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
3528 	goto out_error;
3529 
3530 out_error_mmap:
3531 	perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
3532 	goto out_error;
3533 
3534 out_error_open:
3535 	perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
3536 
3537 out_error:
3538 	fprintf(trace->output, "%s\n", errbuf);
3539 	goto out_delete_evlist;
3540 
3541 out_error_apply_filters:
3542 	fprintf(trace->output,
3543 		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
3544 		evsel->filter, perf_evsel__name(evsel), errno,
3545 		str_error_r(errno, errbuf, sizeof(errbuf)));
3546 	goto out_delete_evlist;
3547 }
3548 out_error_mem:
3549 	fprintf(trace->output, "Not enough memory to run!\n");
3550 	goto out_delete_evlist;
3551 
3552 out_errno:
3553 	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
3554 	goto out_delete_evlist;
3555 }
3556 
3557 static int trace__replay(struct trace *trace)
3558 {
3559 	const struct evsel_str_handler handlers[] = {
3560 		{ "probe:vfs_getname",	     trace__vfs_getname, },
3561 	};
3562 	struct perf_data data = {
3563 		.path  = input_name,
3564 		.mode  = PERF_DATA_MODE_READ,
3565 		.force = trace->force,
3566 	};
3567 	struct perf_session *session;
3568 	struct evsel *evsel;
3569 	int err = -1;
3570 
3571 	trace->tool.sample	  = trace__process_sample;
3572 	trace->tool.mmap	  = perf_event__process_mmap;
3573 	trace->tool.mmap2	  = perf_event__process_mmap2;
3574 	trace->tool.comm	  = perf_event__process_comm;
3575 	trace->tool.exit	  = perf_event__process_exit;
3576 	trace->tool.fork	  = perf_event__process_fork;
3577 	trace->tool.attr	  = perf_event__process_attr;
3578 	trace->tool.tracing_data  = perf_event__process_tracing_data;
3579 	trace->tool.build_id	  = perf_event__process_build_id;
3580 	trace->tool.namespaces	  = perf_event__process_namespaces;
3581 
3582 	trace->tool.ordered_events = true;
3583 	trace->tool.ordering_requires_timestamps = true;
3584 
3585 	/* add tid to output */
3586 	trace->multiple_threads = true;
3587 
3588 	session = perf_session__new(&data, false, &trace->tool);
3589 	if (IS_ERR(session))
3590 		return PTR_ERR(session);
3591 
3592 	if (trace->opts.target.pid)
3593 		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
3594 
3595 	if (trace->opts.target.tid)
3596 		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
3597 
3598 	if (symbol__init(&session->header.env) < 0)
3599 		goto out;
3600 
3601 	trace->host = &session->machines.host;
3602 
3603 	err = perf_session__set_tracepoints_handlers(session, handlers);
3604 	if (err)
3605 		goto out;
3606 
3607 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3608 						     "raw_syscalls:sys_enter");
3609 	/* older kernels have syscalls tp versus raw_syscalls */
3610 	if (evsel == NULL)
3611 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3612 							     "syscalls:sys_enter");
3613 
3614 	if (evsel &&
3615 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
3616 	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
3617 		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3618 		goto out;
3619 	}
3620 
3621 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3622 						     "raw_syscalls:sys_exit");
3623 	if (evsel == NULL)
3624 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3625 							     "syscalls:sys_exit");
3626 	if (evsel &&
3627 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
3628 	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
3629 		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3630 		goto out;
3631 	}
3632 
3633 	evlist__for_each_entry(session->evlist, evsel) {
3634 		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
3635 		    (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
3636 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
3637 		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
3638 			evsel->handler = trace__pgfault;
3639 	}
3640 
3641 	setup_pager();
3642 
3643 	err = perf_session__process_events(session);
3644 	if (err)
3645 		pr_err("Failed to process events, error %d", err);
3646 
3647 	else if (trace->summary)
3648 		trace__fprintf_thread_summary(trace, trace->output);
3649 
3650 out:
3651 	perf_session__delete(session);
3652 
3653 	return err;
3654 }
3655 
3656 static size_t trace__fprintf_threads_header(FILE *fp)
3657 {
3658 	size_t printed;
3659 
3660 	printed  = fprintf(fp, "\n Summary of events:\n\n");
3661 
3662 	return printed;
3663 }
3664 
3665 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
3666 	struct stats 	*stats;
3667 	double		msecs;
3668 	int		syscall;
3669 )
3670 {
3671 	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
3672 	struct stats *stats = source->priv;
3673 
3674 	entry->syscall = source->i;
3675 	entry->stats   = stats;
3676 	entry->msecs   = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
3677 }
3678 
3679 static size_t thread__dump_stats(struct thread_trace *ttrace,
3680 				 struct trace *trace, FILE *fp)
3681 {
3682 	size_t printed = 0;
3683 	struct syscall *sc;
3684 	struct rb_node *nd;
3685 	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
3686 
3687 	if (syscall_stats == NULL)
3688 		return 0;
3689 
3690 	printed += fprintf(fp, "\n");
3691 
3692 	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
3693 	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
3694 	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
3695 
3696 	resort_rb__for_each_entry(nd, syscall_stats) {
3697 		struct stats *stats = syscall_stats_entry->stats;
3698 		if (stats) {
3699 			double min = (double)(stats->min) / NSEC_PER_MSEC;
3700 			double max = (double)(stats->max) / NSEC_PER_MSEC;
3701 			double avg = avg_stats(stats);
3702 			double pct;
3703 			u64 n = (u64) stats->n;
3704 
3705 			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
3706 			avg /= NSEC_PER_MSEC;
3707 
3708 			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
3709 			printed += fprintf(fp, "   %-15s", sc->name);
3710 			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
3711 					   n, syscall_stats_entry->msecs, min, avg);
3712 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
3713 		}
3714 	}
3715 
3716 	resort_rb__delete(syscall_stats);
3717 	printed += fprintf(fp, "\n\n");
3718 
3719 	return printed;
3720 }
3721 
3722 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
3723 {
3724 	size_t printed = 0;
3725 	struct thread_trace *ttrace = thread__priv(thread);
3726 	double ratio;
3727 
3728 	if (ttrace == NULL)
3729 		return 0;
3730 
3731 	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
3732 
3733 	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
3734 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
3735 	printed += fprintf(fp, "%.1f%%", ratio);
3736 	if (ttrace->pfmaj)
3737 		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
3738 	if (ttrace->pfmin)
3739 		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
3740 	if (trace->sched)
3741 		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
3742 	else if (fputc('\n', fp) != EOF)
3743 		++printed;
3744 
3745 	printed += thread__dump_stats(ttrace, trace, fp);
3746 
3747 	return printed;
3748 }
3749 
3750 static unsigned long thread__nr_events(struct thread_trace *ttrace)
3751 {
3752 	return ttrace ? ttrace->nr_events : 0;
3753 }
3754 
3755 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
3756 	struct thread *thread;
3757 )
3758 {
3759 	entry->thread = rb_entry(nd, struct thread, rb_node);
3760 }
3761 
3762 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
3763 {
3764 	size_t printed = trace__fprintf_threads_header(fp);
3765 	struct rb_node *nd;
3766 	int i;
3767 
3768 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3769 		DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
3770 
3771 		if (threads == NULL) {
3772 			fprintf(fp, "%s", "Error sorting output by nr_events!\n");
3773 			return 0;
3774 		}
3775 
3776 		resort_rb__for_each_entry(nd, threads)
3777 			printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
3778 
3779 		resort_rb__delete(threads);
3780 	}
3781 	return printed;
3782 }
3783 
3784 static int trace__set_duration(const struct option *opt, const char *str,
3785 			       int unset __maybe_unused)
3786 {
3787 	struct trace *trace = opt->value;
3788 
3789 	trace->duration_filter = atof(str);
3790 	return 0;
3791 }
3792 
3793 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
3794 					      int unset __maybe_unused)
3795 {
3796 	int ret = -1;
3797 	size_t i;
3798 	struct trace *trace = opt->value;
3799 	/*
3800 	 * FIXME: introduce a intarray class, plain parse csv and create a
3801 	 * { int nr, int entries[] } struct...
3802 	 */
3803 	struct intlist *list = intlist__new(str);
3804 
3805 	if (list == NULL)
3806 		return -1;
3807 
3808 	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3809 	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3810 
3811 	if (trace->filter_pids.entries == NULL)
3812 		goto out;
3813 
3814 	trace->filter_pids.entries[0] = getpid();
3815 
3816 	for (i = 1; i < trace->filter_pids.nr; ++i)
3817 		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3818 
3819 	intlist__delete(list);
3820 	ret = 0;
3821 out:
3822 	return ret;
3823 }
3824 
3825 static int trace__open_output(struct trace *trace, const char *filename)
3826 {
3827 	struct stat st;
3828 
3829 	if (!stat(filename, &st) && st.st_size) {
3830 		char oldname[PATH_MAX];
3831 
3832 		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3833 		unlink(oldname);
3834 		rename(filename, oldname);
3835 	}
3836 
3837 	trace->output = fopen(filename, "w");
3838 
3839 	return trace->output == NULL ? -errno : 0;
3840 }
3841 
3842 static int parse_pagefaults(const struct option *opt, const char *str,
3843 			    int unset __maybe_unused)
3844 {
3845 	int *trace_pgfaults = opt->value;
3846 
3847 	if (strcmp(str, "all") == 0)
3848 		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3849 	else if (strcmp(str, "maj") == 0)
3850 		*trace_pgfaults |= TRACE_PFMAJ;
3851 	else if (strcmp(str, "min") == 0)
3852 		*trace_pgfaults |= TRACE_PFMIN;
3853 	else
3854 		return -1;
3855 
3856 	return 0;
3857 }
3858 
3859 static void evlist__set_evsel_handler(struct evlist *evlist, void *handler)
3860 {
3861 	struct evsel *evsel;
3862 
3863 	evlist__for_each_entry(evlist, evsel)
3864 		evsel->handler = handler;
3865 }
3866 
3867 static int evlist__set_syscall_tp_fields(struct evlist *evlist)
3868 {
3869 	struct evsel *evsel;
3870 
3871 	evlist__for_each_entry(evlist, evsel) {
3872 		if (evsel->priv || !evsel->tp_format)
3873 			continue;
3874 
3875 		if (strcmp(evsel->tp_format->system, "syscalls"))
3876 			continue;
3877 
3878 		if (perf_evsel__init_syscall_tp(evsel))
3879 			return -1;
3880 
3881 		if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
3882 			struct syscall_tp *sc = evsel->priv;
3883 
3884 			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
3885 				return -1;
3886 		} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
3887 			struct syscall_tp *sc = evsel->priv;
3888 
3889 			if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
3890 				return -1;
3891 		}
3892 	}
3893 
3894 	return 0;
3895 }
3896 
3897 /*
3898  * XXX: Hackish, just splitting the combined -e+--event (syscalls
3899  * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3900  * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3901  *
3902  * It'd be better to introduce a parse_options() variant that would return a
3903  * list with the terms it didn't match to an event...
3904  */
3905 static int trace__parse_events_option(const struct option *opt, const char *str,
3906 				      int unset __maybe_unused)
3907 {
3908 	struct trace *trace = (struct trace *)opt->value;
3909 	const char *s = str;
3910 	char *sep = NULL, *lists[2] = { NULL, NULL, };
3911 	int len = strlen(str) + 1, err = -1, list, idx;
3912 	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
3913 	char group_name[PATH_MAX];
3914 	struct syscall_fmt *fmt;
3915 
3916 	if (strace_groups_dir == NULL)
3917 		return -1;
3918 
3919 	if (*s == '!') {
3920 		++s;
3921 		trace->not_ev_qualifier = true;
3922 	}
3923 
3924 	while (1) {
3925 		if ((sep = strchr(s, ',')) != NULL)
3926 			*sep = '\0';
3927 
3928 		list = 0;
3929 		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
3930 		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
3931 			list = 1;
3932 			goto do_concat;
3933 		}
3934 
3935 		fmt = syscall_fmt__find_by_alias(s);
3936 		if (fmt != NULL) {
3937 			list = 1;
3938 			s = fmt->name;
3939 		} else {
3940 			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
3941 			if (access(group_name, R_OK) == 0)
3942 				list = 1;
3943 		}
3944 do_concat:
3945 		if (lists[list]) {
3946 			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
3947 		} else {
3948 			lists[list] = malloc(len);
3949 			if (lists[list] == NULL)
3950 				goto out;
3951 			strcpy(lists[list], s);
3952 		}
3953 
3954 		if (!sep)
3955 			break;
3956 
3957 		*sep = ',';
3958 		s = sep + 1;
3959 	}
3960 
3961 	if (lists[1] != NULL) {
3962 		struct strlist_config slist_config = {
3963 			.dirname = strace_groups_dir,
3964 		};
3965 
3966 		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
3967 		if (trace->ev_qualifier == NULL) {
3968 			fputs("Not enough memory to parse event qualifier", trace->output);
3969 			goto out;
3970 		}
3971 
3972 		if (trace__validate_ev_qualifier(trace))
3973 			goto out;
3974 		trace->trace_syscalls = true;
3975 	}
3976 
3977 	err = 0;
3978 
3979 	if (lists[0]) {
3980 		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
3981 					       "event selector. use 'perf list' to list available events",
3982 					       parse_events_option);
3983 		err = parse_events_option(&o, lists[0], 0);
3984 	}
3985 out:
3986 	if (sep)
3987 		*sep = ',';
3988 
3989 	return err;
3990 }
3991 
3992 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
3993 {
3994 	struct trace *trace = opt->value;
3995 
3996 	if (!list_empty(&trace->evlist->core.entries))
3997 		return parse_cgroups(opt, str, unset);
3998 
3999 	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4000 
4001 	return 0;
4002 }
4003 
4004 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
4005 {
4006 	if (trace->bpf_obj == NULL)
4007 		return NULL;
4008 
4009 	return bpf_object__find_map_by_name(trace->bpf_obj, name);
4010 }
4011 
4012 static void trace__set_bpf_map_filtered_pids(struct trace *trace)
4013 {
4014 	trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
4015 }
4016 
4017 static void trace__set_bpf_map_syscalls(struct trace *trace)
4018 {
4019 	trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
4020 	trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
4021 	trace->syscalls.prog_array.sys_exit  = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
4022 }
4023 
4024 static int trace__config(const char *var, const char *value, void *arg)
4025 {
4026 	struct trace *trace = arg;
4027 	int err = 0;
4028 
4029 	if (!strcmp(var, "trace.add_events")) {
4030 		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
4031 					       "event selector. use 'perf list' to list available events",
4032 					       parse_events_option);
4033 		/*
4034 		 * We can't propagate parse_event_option() return, as it is 1
4035 		 * for failure while perf_config() expects -1.
4036 		 */
4037 		if (parse_events_option(&o, value, 0))
4038 			err = -1;
4039 	} else if (!strcmp(var, "trace.show_timestamp")) {
4040 		trace->show_tstamp = perf_config_bool(var, value);
4041 	} else if (!strcmp(var, "trace.show_duration")) {
4042 		trace->show_duration = perf_config_bool(var, value);
4043 	} else if (!strcmp(var, "trace.show_arg_names")) {
4044 		trace->show_arg_names = perf_config_bool(var, value);
4045 		if (!trace->show_arg_names)
4046 			trace->show_zeros = true;
4047 	} else if (!strcmp(var, "trace.show_zeros")) {
4048 		bool new_show_zeros = perf_config_bool(var, value);
4049 		if (!trace->show_arg_names && !new_show_zeros) {
4050 			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4051 			goto out;
4052 		}
4053 		trace->show_zeros = new_show_zeros;
4054 	} else if (!strcmp(var, "trace.show_prefix")) {
4055 		trace->show_string_prefix = perf_config_bool(var, value);
4056 	} else if (!strcmp(var, "trace.no_inherit")) {
4057 		trace->opts.no_inherit = perf_config_bool(var, value);
4058 	} else if (!strcmp(var, "trace.args_alignment")) {
4059 		int args_alignment = 0;
4060 		if (perf_config_int(&args_alignment, var, value) == 0)
4061 			trace->args_alignment = args_alignment;
4062 	}
4063 out:
4064 	return err;
4065 }
4066 
4067 int cmd_trace(int argc, const char **argv)
4068 {
4069 	const char *trace_usage[] = {
4070 		"perf trace [<options>] [<command>]",
4071 		"perf trace [<options>] -- <command> [<options>]",
4072 		"perf trace record [<options>] [<command>]",
4073 		"perf trace record [<options>] -- <command> [<options>]",
4074 		NULL
4075 	};
4076 	struct trace trace = {
4077 		.opts = {
4078 			.target = {
4079 				.uid	   = UINT_MAX,
4080 				.uses_mmap = true,
4081 			},
4082 			.user_freq     = UINT_MAX,
4083 			.user_interval = ULLONG_MAX,
4084 			.no_buffering  = true,
4085 			.mmap_pages    = UINT_MAX,
4086 		},
4087 		.output = stderr,
4088 		.show_comm = true,
4089 		.show_tstamp = true,
4090 		.show_duration = true,
4091 		.show_arg_names = true,
4092 		.args_alignment = 70,
4093 		.trace_syscalls = false,
4094 		.kernel_syscallchains = false,
4095 		.max_stack = UINT_MAX,
4096 		.max_events = ULONG_MAX,
4097 	};
4098 	const char *map_dump_str = NULL;
4099 	const char *output_name = NULL;
4100 	const struct option trace_options[] = {
4101 	OPT_CALLBACK('e', "event", &trace, "event",
4102 		     "event/syscall selector. use 'perf list' to list available events",
4103 		     trace__parse_events_option),
4104 	OPT_BOOLEAN(0, "comm", &trace.show_comm,
4105 		    "show the thread COMM next to its id"),
4106 	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4107 	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4108 		     trace__parse_events_option),
4109 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
4110 	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4111 	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4112 		    "trace events on existing process id"),
4113 	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4114 		    "trace events on existing thread id"),
4115 	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4116 		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4117 	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4118 		    "system-wide collection from all CPUs"),
4119 	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4120 		    "list of cpus to monitor"),
4121 	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4122 		    "child tasks do not inherit counters"),
4123 	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4124 		     "number of mmap data pages",
4125 		     perf_evlist__parse_mmap_pages),
4126 	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4127 		   "user to profile"),
4128 	OPT_CALLBACK(0, "duration", &trace, "float",
4129 		     "show only events with duration > N.M ms",
4130 		     trace__set_duration),
4131 #ifdef HAVE_LIBBPF_SUPPORT
4132 	OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4133 #endif
4134 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4135 	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4136 	OPT_BOOLEAN('T', "time", &trace.full_time,
4137 		    "Show full timestamp, not time relative to first start"),
4138 	OPT_BOOLEAN(0, "failure", &trace.failure_only,
4139 		    "Show only syscalls that failed"),
4140 	OPT_BOOLEAN('s', "summary", &trace.summary_only,
4141 		    "Show only syscall summary with statistics"),
4142 	OPT_BOOLEAN('S', "with-summary", &trace.summary,
4143 		    "Show all syscalls and summary with statistics"),
4144 	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4145 		     "Trace pagefaults", parse_pagefaults, "maj"),
4146 	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4147 	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4148 	OPT_CALLBACK(0, "call-graph", &trace.opts,
4149 		     "record_mode[,record_size]", record_callchain_help,
4150 		     &record_parse_callchain_opt),
4151 	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4152 		    "Show the kernel callchains on the syscall exit path"),
4153 	OPT_ULONG(0, "max-events", &trace.max_events,
4154 		"Set the maximum number of events to print, exit after that is reached. "),
4155 	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4156 		     "Set the minimum stack depth when parsing the callchain, "
4157 		     "anything below the specified depth will be ignored."),
4158 	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4159 		     "Set the maximum stack depth when parsing the callchain, "
4160 		     "anything beyond the specified depth will be ignored. "
4161 		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4162 	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4163 			"Sort batch of events before processing, use if getting out of order events"),
4164 	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4165 			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4166 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4167 			"per thread proc mmap processing timeout in ms"),
4168 	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4169 		     trace__parse_cgroups),
4170 	OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
4171 		     "ms to wait before starting measurement after program "
4172 		     "start"),
4173 	OPTS_EVSWITCH(&trace.evswitch),
4174 	OPT_END()
4175 	};
4176 	bool __maybe_unused max_stack_user_set = true;
4177 	bool mmap_pages_user_set = true;
4178 	struct evsel *evsel;
4179 	const char * const trace_subcommands[] = { "record", NULL };
4180 	int err = -1;
4181 	char bf[BUFSIZ];
4182 
4183 	signal(SIGSEGV, sighandler_dump_stack);
4184 	signal(SIGFPE, sighandler_dump_stack);
4185 
4186 	trace.evlist = evlist__new();
4187 	trace.sctbl = syscalltbl__new();
4188 
4189 	if (trace.evlist == NULL || trace.sctbl == NULL) {
4190 		pr_err("Not enough memory to run!\n");
4191 		err = -ENOMEM;
4192 		goto out;
4193 	}
4194 
4195 	/*
4196 	 * Parsing .perfconfig may entail creating a BPF event, that may need
4197 	 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4198 	 * is too small. This affects just this process, not touching the
4199 	 * global setting. If it fails we'll get something in 'perf trace -v'
4200 	 * to help diagnose the problem.
4201 	 */
4202 	rlimit__bump_memlock();
4203 
4204 	err = perf_config(trace__config, &trace);
4205 	if (err)
4206 		goto out;
4207 
4208 	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4209 				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4210 
4211 	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4212 		usage_with_options_msg(trace_usage, trace_options,
4213 				       "cgroup monitoring only available in system-wide mode");
4214 	}
4215 
4216 	evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4217 	if (IS_ERR(evsel)) {
4218 		bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4219 		pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4220 		goto out;
4221 	}
4222 
4223 	if (evsel) {
4224 		trace.syscalls.events.augmented = evsel;
4225 
4226 		evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4227 		if (evsel == NULL) {
4228 			pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4229 			goto out;
4230 		}
4231 
4232 		if (evsel->bpf_obj == NULL) {
4233 			pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4234 			goto out;
4235 		}
4236 
4237 		trace.bpf_obj = evsel->bpf_obj;
4238 
4239 		trace__set_bpf_map_filtered_pids(&trace);
4240 		trace__set_bpf_map_syscalls(&trace);
4241 		trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4242 	}
4243 
4244 	err = bpf__setup_stdout(trace.evlist);
4245 	if (err) {
4246 		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4247 		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4248 		goto out;
4249 	}
4250 
4251 	err = -1;
4252 
4253 	if (map_dump_str) {
4254 		trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4255 		if (trace.dump.map == NULL) {
4256 			pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4257 			goto out;
4258 		}
4259 	}
4260 
4261 	if (trace.trace_pgfaults) {
4262 		trace.opts.sample_address = true;
4263 		trace.opts.sample_time = true;
4264 	}
4265 
4266 	if (trace.opts.mmap_pages == UINT_MAX)
4267 		mmap_pages_user_set = false;
4268 
4269 	if (trace.max_stack == UINT_MAX) {
4270 		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4271 		max_stack_user_set = false;
4272 	}
4273 
4274 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4275 	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4276 		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4277 	}
4278 #endif
4279 
4280 	if (callchain_param.enabled) {
4281 		if (!mmap_pages_user_set && geteuid() == 0)
4282 			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4283 
4284 		symbol_conf.use_callchain = true;
4285 	}
4286 
4287 	if (trace.evlist->core.nr_entries > 0) {
4288 		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
4289 		if (evlist__set_syscall_tp_fields(trace.evlist)) {
4290 			perror("failed to set syscalls:* tracepoint fields");
4291 			goto out;
4292 		}
4293 	}
4294 
4295 	if (trace.sort_events) {
4296 		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4297 		ordered_events__set_copy_on_queue(&trace.oe.data, true);
4298 	}
4299 
4300 	/*
4301 	 * If we are augmenting syscalls, then combine what we put in the
4302 	 * __augmented_syscalls__ BPF map with what is in the
4303 	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4304 	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4305 	 *
4306 	 * We'll switch to look at two BPF maps, one for sys_enter and the
4307 	 * other for sys_exit when we start augmenting the sys_exit paths with
4308 	 * buffers that are being copied from kernel to userspace, think 'read'
4309 	 * syscall.
4310 	 */
4311 	if (trace.syscalls.events.augmented) {
4312 		evlist__for_each_entry(trace.evlist, evsel) {
4313 			bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
4314 
4315 			if (raw_syscalls_sys_exit) {
4316 				trace.raw_augmented_syscalls = true;
4317 				goto init_augmented_syscall_tp;
4318 			}
4319 
4320 			if (trace.syscalls.events.augmented->priv == NULL &&
4321 			    strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
4322 				struct evsel *augmented = trace.syscalls.events.augmented;
4323 				if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
4324 				    perf_evsel__init_augmented_syscall_tp_args(augmented))
4325 					goto out;
4326 				/*
4327 				 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4328 				 * Above we made sure we can get from the payload the tp fields
4329 				 * that we get from syscalls:sys_enter tracefs format file.
4330 				 */
4331 				augmented->handler = trace__sys_enter;
4332 				/*
4333 				 * Now we do the same for the *syscalls:sys_enter event so that
4334 				 * if we handle it directly, i.e. if the BPF prog returns 0 so
4335 				 * as not to filter it, then we'll handle it just like we would
4336 				 * for the BPF_OUTPUT one:
4337 				 */
4338 				if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
4339 				    perf_evsel__init_augmented_syscall_tp_args(evsel))
4340 					goto out;
4341 				evsel->handler = trace__sys_enter;
4342 			}
4343 
4344 			if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
4345 				struct syscall_tp *sc;
4346 init_augmented_syscall_tp:
4347 				if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
4348 					goto out;
4349 				sc = evsel->priv;
4350 				/*
4351 				 * For now with BPF raw_augmented we hook into
4352 				 * raw_syscalls:sys_enter and there we get all
4353 				 * 6 syscall args plus the tracepoint common
4354 				 * fields and the syscall_nr (another long).
4355 				 * So we check if that is the case and if so
4356 				 * don't look after the sc->args_size but
4357 				 * always after the full raw_syscalls:sys_enter
4358 				 * payload, which is fixed.
4359 				 *
4360 				 * We'll revisit this later to pass
4361 				 * s->args_size to the BPF augmenter (now
4362 				 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4363 				 * so that it copies only what we need for each
4364 				 * syscall, like what happens when we use
4365 				 * syscalls:sys_enter_NAME, so that we reduce
4366 				 * the kernel/userspace traffic to just what is
4367 				 * needed for each syscall.
4368 				 */
4369 				if (trace.raw_augmented_syscalls)
4370 					trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
4371 				perf_evsel__init_augmented_syscall_tp_ret(evsel);
4372 				evsel->handler = trace__sys_exit;
4373 			}
4374 		}
4375 	}
4376 
4377 	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
4378 		return trace__record(&trace, argc-1, &argv[1]);
4379 
4380 	/* summary_only implies summary option, but don't overwrite summary if set */
4381 	if (trace.summary_only)
4382 		trace.summary = trace.summary_only;
4383 
4384 	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4385 	    trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4386 		trace.trace_syscalls = true;
4387 	}
4388 
4389 	if (output_name != NULL) {
4390 		err = trace__open_output(&trace, output_name);
4391 		if (err < 0) {
4392 			perror("failed to create output file");
4393 			goto out;
4394 		}
4395 	}
4396 
4397 	err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
4398 	if (err)
4399 		goto out_close;
4400 
4401 	err = target__validate(&trace.opts.target);
4402 	if (err) {
4403 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4404 		fprintf(trace.output, "%s", bf);
4405 		goto out_close;
4406 	}
4407 
4408 	err = target__parse_uid(&trace.opts.target);
4409 	if (err) {
4410 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
4411 		fprintf(trace.output, "%s", bf);
4412 		goto out_close;
4413 	}
4414 
4415 	if (!argc && target__none(&trace.opts.target))
4416 		trace.opts.target.system_wide = true;
4417 
4418 	if (input_name)
4419 		err = trace__replay(&trace);
4420 	else
4421 		err = trace__run(&trace, argc, argv);
4422 
4423 out_close:
4424 	if (output_name != NULL)
4425 		fclose(trace.output);
4426 out:
4427 	return err;
4428 }
4429