xref: /linux/tools/perf/builtin-trace.c (revision d6e4b3e326d8b44675b9e19534347d97073826aa)
1 /*
2  * builtin-trace.c
3  *
4  * Builtin 'trace' command:
5  *
6  * Display a continuously updated trace of any workload, CPU, specific PID,
7  * system wide, etc.  Default format is loosely strace like, but any other
8  * event may be specified using --event.
9  *
10  * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11  *
12  * Initially based on the 'trace' prototype by Thomas Gleixner:
13  *
14  * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15  *
16  * Released under the GPL v2. (and only v2, not any later version)
17  */
18 
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
21 #include <bpf/bpf.h>
22 #include "builtin.h"
23 #include "util/cgroup.h"
24 #include "util/color.h"
25 #include "util/config.h"
26 #include "util/debug.h"
27 #include "util/env.h"
28 #include "util/event.h"
29 #include "util/evlist.h"
30 #include <subcmd/exec-cmd.h>
31 #include "util/machine.h"
32 #include "util/path.h"
33 #include "util/session.h"
34 #include "util/thread.h"
35 #include <subcmd/parse-options.h>
36 #include "util/strlist.h"
37 #include "util/intlist.h"
38 #include "util/thread_map.h"
39 #include "util/stat.h"
40 #include "trace/beauty/beauty.h"
41 #include "trace-event.h"
42 #include "util/parse-events.h"
43 #include "util/bpf-loader.h"
44 #include "callchain.h"
45 #include "print_binary.h"
46 #include "string2.h"
47 #include "syscalltbl.h"
48 #include "rb_resort.h"
49 
50 #include <errno.h>
51 #include <inttypes.h>
52 #include <poll.h>
53 #include <signal.h>
54 #include <stdlib.h>
55 #include <string.h>
56 #include <linux/err.h>
57 #include <linux/filter.h>
58 #include <linux/kernel.h>
59 #include <linux/random.h>
60 #include <linux/stringify.h>
61 #include <linux/time64.h>
62 #include <fcntl.h>
63 
64 #include "sane_ctype.h"
65 
66 #ifndef O_CLOEXEC
67 # define O_CLOEXEC		02000000
68 #endif
69 
70 #ifndef F_LINUX_SPECIFIC_BASE
71 # define F_LINUX_SPECIFIC_BASE	1024
72 #endif
73 
74 struct trace {
75 	struct perf_tool	tool;
76 	struct syscalltbl	*sctbl;
77 	struct {
78 		int		max;
79 		struct syscall  *table;
80 		struct bpf_map  *map;
81 		struct {
82 			struct perf_evsel *sys_enter,
83 					  *sys_exit,
84 					  *augmented;
85 		}		events;
86 	} syscalls;
87 	struct record_opts	opts;
88 	struct perf_evlist	*evlist;
89 	struct machine		*host;
90 	struct thread		*current;
91 	struct cgroup		*cgroup;
92 	u64			base_time;
93 	FILE			*output;
94 	unsigned long		nr_events;
95 	unsigned long		nr_events_printed;
96 	unsigned long		max_events;
97 	struct strlist		*ev_qualifier;
98 	struct {
99 		size_t		nr;
100 		int		*entries;
101 	}			ev_qualifier_ids;
102 	struct {
103 		size_t		nr;
104 		pid_t		*entries;
105 		struct bpf_map  *map;
106 	}			filter_pids;
107 	double			duration_filter;
108 	double			runtime_ms;
109 	struct {
110 		u64		vfs_getname,
111 				proc_getname;
112 	} stats;
113 	unsigned int		max_stack;
114 	unsigned int		min_stack;
115 	bool			sort_events;
116 	bool			raw_augmented_syscalls;
117 	bool			not_ev_qualifier;
118 	bool			live;
119 	bool			full_time;
120 	bool			sched;
121 	bool			multiple_threads;
122 	bool			summary;
123 	bool			summary_only;
124 	bool			failure_only;
125 	bool			show_comm;
126 	bool			print_sample;
127 	bool			show_tool_stats;
128 	bool			trace_syscalls;
129 	bool			kernel_syscallchains;
130 	s16			args_alignment;
131 	bool			show_tstamp;
132 	bool			show_duration;
133 	bool			show_zeros;
134 	bool			show_arg_names;
135 	bool			show_string_prefix;
136 	bool			force;
137 	bool			vfs_getname;
138 	int			trace_pgfaults;
139 	struct {
140 		struct ordered_events	data;
141 		u64			last;
142 	} oe;
143 };
144 
145 struct tp_field {
146 	int offset;
147 	union {
148 		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
149 		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
150 	};
151 };
152 
153 #define TP_UINT_FIELD(bits) \
154 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
155 { \
156 	u##bits value; \
157 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
158 	return value;  \
159 }
160 
161 TP_UINT_FIELD(8);
162 TP_UINT_FIELD(16);
163 TP_UINT_FIELD(32);
164 TP_UINT_FIELD(64);
165 
166 #define TP_UINT_FIELD__SWAPPED(bits) \
167 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
168 { \
169 	u##bits value; \
170 	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
171 	return bswap_##bits(value);\
172 }
173 
174 TP_UINT_FIELD__SWAPPED(16);
175 TP_UINT_FIELD__SWAPPED(32);
176 TP_UINT_FIELD__SWAPPED(64);
177 
178 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
179 {
180 	field->offset = offset;
181 
182 	switch (size) {
183 	case 1:
184 		field->integer = tp_field__u8;
185 		break;
186 	case 2:
187 		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
188 		break;
189 	case 4:
190 		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
191 		break;
192 	case 8:
193 		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
194 		break;
195 	default:
196 		return -1;
197 	}
198 
199 	return 0;
200 }
201 
202 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
203 {
204 	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
205 }
206 
207 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
208 {
209 	return sample->raw_data + field->offset;
210 }
211 
212 static int __tp_field__init_ptr(struct tp_field *field, int offset)
213 {
214 	field->offset = offset;
215 	field->pointer = tp_field__ptr;
216 	return 0;
217 }
218 
219 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
220 {
221 	return __tp_field__init_ptr(field, format_field->offset);
222 }
223 
224 struct syscall_tp {
225 	struct tp_field id;
226 	union {
227 		struct tp_field args, ret;
228 	};
229 };
230 
231 static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
232 					  struct tp_field *field,
233 					  const char *name)
234 {
235 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
236 
237 	if (format_field == NULL)
238 		return -1;
239 
240 	return tp_field__init_uint(field, format_field, evsel->needs_swap);
241 }
242 
243 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
244 	({ struct syscall_tp *sc = evsel->priv;\
245 	   perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
246 
247 static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
248 					 struct tp_field *field,
249 					 const char *name)
250 {
251 	struct tep_format_field *format_field = perf_evsel__field(evsel, name);
252 
253 	if (format_field == NULL)
254 		return -1;
255 
256 	return tp_field__init_ptr(field, format_field);
257 }
258 
259 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
260 	({ struct syscall_tp *sc = evsel->priv;\
261 	   perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
262 
263 static void perf_evsel__delete_priv(struct perf_evsel *evsel)
264 {
265 	zfree(&evsel->priv);
266 	perf_evsel__delete(evsel);
267 }
268 
269 static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel)
270 {
271 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
272 
273 	if (evsel->priv != NULL) {
274 		if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
275 		    perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
276 			goto out_delete;
277 		return 0;
278 	}
279 
280 	return -ENOMEM;
281 out_delete:
282 	zfree(&evsel->priv);
283 	return -ENOENT;
284 }
285 
286 static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel)
287 {
288 	struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
289 
290 	if (evsel->priv != NULL) {       /* field, sizeof_field, offsetof_field */
291 		if (__tp_field__init_uint(&sc->id, sizeof(long), sizeof(long long), evsel->needs_swap))
292 			goto out_delete;
293 
294 		return 0;
295 	}
296 
297 	return -ENOMEM;
298 out_delete:
299 	zfree(&evsel->priv);
300 	return -EINVAL;
301 }
302 
303 static int perf_evsel__init_augmented_syscall_tp_args(struct perf_evsel *evsel)
304 {
305 	struct syscall_tp *sc = evsel->priv;
306 
307 	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
308 }
309 
310 static int perf_evsel__init_augmented_syscall_tp_ret(struct perf_evsel *evsel)
311 {
312 	struct syscall_tp *sc = evsel->priv;
313 
314 	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
315 }
316 
317 static int perf_evsel__init_raw_syscall_tp(struct perf_evsel *evsel, void *handler)
318 {
319 	evsel->priv = malloc(sizeof(struct syscall_tp));
320 	if (evsel->priv != NULL) {
321 		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
322 			goto out_delete;
323 
324 		evsel->handler = handler;
325 		return 0;
326 	}
327 
328 	return -ENOMEM;
329 
330 out_delete:
331 	zfree(&evsel->priv);
332 	return -ENOENT;
333 }
334 
335 static struct perf_evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
336 {
337 	struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
338 
339 	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
340 	if (IS_ERR(evsel))
341 		evsel = perf_evsel__newtp("syscalls", direction);
342 
343 	if (IS_ERR(evsel))
344 		return NULL;
345 
346 	if (perf_evsel__init_raw_syscall_tp(evsel, handler))
347 		goto out_delete;
348 
349 	return evsel;
350 
351 out_delete:
352 	perf_evsel__delete_priv(evsel);
353 	return NULL;
354 }
355 
356 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
357 	({ struct syscall_tp *fields = evsel->priv; \
358 	   fields->name.integer(&fields->name, sample); })
359 
360 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
361 	({ struct syscall_tp *fields = evsel->priv; \
362 	   fields->name.pointer(&fields->name, sample); })
363 
364 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
365 {
366 	int idx = val - sa->offset;
367 
368 	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
369 		size_t printed = scnprintf(bf, size, intfmt, val);
370 		if (show_prefix)
371 			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
372 		return printed;
373 	}
374 
375 	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
376 }
377 
378 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
379 						const char *intfmt,
380 					        struct syscall_arg *arg)
381 {
382 	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
383 }
384 
385 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
386 					      struct syscall_arg *arg)
387 {
388 	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
389 }
390 
391 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
392 
393 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
394 {
395 	size_t printed;
396 	int i;
397 
398 	for (i = 0; i < sas->nr_entries; ++i) {
399 		struct strarray *sa = sas->entries[i];
400 		int idx = val - sa->offset;
401 
402 		if (idx >= 0 && idx < sa->nr_entries) {
403 			if (sa->entries[idx] == NULL)
404 				break;
405 			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
406 		}
407 	}
408 
409 	printed = scnprintf(bf, size, intfmt, val);
410 	if (show_prefix)
411 		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
412 	return printed;
413 }
414 
415 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
416 					struct syscall_arg *arg)
417 {
418 	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
419 }
420 
421 #ifndef AT_FDCWD
422 #define AT_FDCWD	-100
423 #endif
424 
425 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
426 					   struct syscall_arg *arg)
427 {
428 	int fd = arg->val;
429 	const char *prefix = "AT_FD";
430 
431 	if (fd == AT_FDCWD)
432 		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
433 
434 	return syscall_arg__scnprintf_fd(bf, size, arg);
435 }
436 
437 #define SCA_FDAT syscall_arg__scnprintf_fd_at
438 
439 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
440 					      struct syscall_arg *arg);
441 
442 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
443 
444 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
445 {
446 	return scnprintf(bf, size, "%#lx", arg->val);
447 }
448 
449 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
450 {
451 	if (arg->val == 0)
452 		return scnprintf(bf, size, "NULL");
453 	return syscall_arg__scnprintf_hex(bf, size, arg);
454 }
455 
456 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
457 {
458 	return scnprintf(bf, size, "%d", arg->val);
459 }
460 
461 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
462 {
463 	return scnprintf(bf, size, "%ld", arg->val);
464 }
465 
466 static const char *bpf_cmd[] = {
467 	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
468 	"MAP_GET_NEXT_KEY", "PROG_LOAD",
469 };
470 static DEFINE_STRARRAY(bpf_cmd, "BPF_");
471 
472 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
473 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
474 
475 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
476 static DEFINE_STRARRAY(itimers, "ITIMER_");
477 
478 static const char *keyctl_options[] = {
479 	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
480 	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
481 	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
482 	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
483 	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
484 };
485 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
486 
487 static const char *whences[] = { "SET", "CUR", "END",
488 #ifdef SEEK_DATA
489 "DATA",
490 #endif
491 #ifdef SEEK_HOLE
492 "HOLE",
493 #endif
494 };
495 static DEFINE_STRARRAY(whences, "SEEK_");
496 
497 static const char *fcntl_cmds[] = {
498 	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
499 	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
500 	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
501 	"GETOWNER_UIDS",
502 };
503 static DEFINE_STRARRAY(fcntl_cmds, "F_");
504 
505 static const char *fcntl_linux_specific_cmds[] = {
506 	"SETLEASE", "GETLEASE", "NOTIFY", [5] =	"CANCELLK", "DUPFD_CLOEXEC",
507 	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
508 	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
509 };
510 
511 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
512 
513 static struct strarray *fcntl_cmds_arrays[] = {
514 	&strarray__fcntl_cmds,
515 	&strarray__fcntl_linux_specific_cmds,
516 };
517 
518 static DEFINE_STRARRAYS(fcntl_cmds_arrays);
519 
520 static const char *rlimit_resources[] = {
521 	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
522 	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
523 	"RTTIME",
524 };
525 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
526 
527 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
528 static DEFINE_STRARRAY(sighow, "SIG_");
529 
530 static const char *clockid[] = {
531 	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
532 	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
533 	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
534 };
535 static DEFINE_STRARRAY(clockid, "CLOCK_");
536 
537 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
538 						 struct syscall_arg *arg)
539 {
540 	bool show_prefix = arg->show_string_prefix;
541 	const char *suffix = "_OK";
542 	size_t printed = 0;
543 	int mode = arg->val;
544 
545 	if (mode == F_OK) /* 0 */
546 		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
547 #define	P_MODE(n) \
548 	if (mode & n##_OK) { \
549 		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
550 		mode &= ~n##_OK; \
551 	}
552 
553 	P_MODE(R);
554 	P_MODE(W);
555 	P_MODE(X);
556 #undef P_MODE
557 
558 	if (mode)
559 		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
560 
561 	return printed;
562 }
563 
564 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
565 
566 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
567 					      struct syscall_arg *arg);
568 
569 #define SCA_FILENAME syscall_arg__scnprintf_filename
570 
571 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
572 						struct syscall_arg *arg)
573 {
574 	bool show_prefix = arg->show_string_prefix;
575 	const char *prefix = "O_";
576 	int printed = 0, flags = arg->val;
577 
578 #define	P_FLAG(n) \
579 	if (flags & O_##n) { \
580 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
581 		flags &= ~O_##n; \
582 	}
583 
584 	P_FLAG(CLOEXEC);
585 	P_FLAG(NONBLOCK);
586 #undef P_FLAG
587 
588 	if (flags)
589 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
590 
591 	return printed;
592 }
593 
594 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
595 
596 #ifndef GRND_NONBLOCK
597 #define GRND_NONBLOCK	0x0001
598 #endif
599 #ifndef GRND_RANDOM
600 #define GRND_RANDOM	0x0002
601 #endif
602 
603 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
604 						   struct syscall_arg *arg)
605 {
606 	bool show_prefix = arg->show_string_prefix;
607 	const char *prefix = "GRND_";
608 	int printed = 0, flags = arg->val;
609 
610 #define	P_FLAG(n) \
611 	if (flags & GRND_##n) { \
612 		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
613 		flags &= ~GRND_##n; \
614 	}
615 
616 	P_FLAG(RANDOM);
617 	P_FLAG(NONBLOCK);
618 #undef P_FLAG
619 
620 	if (flags)
621 		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
622 
623 	return printed;
624 }
625 
626 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
627 
628 #define STRARRAY(name, array) \
629 	  { .scnprintf	= SCA_STRARRAY, \
630 	    .parm	= &strarray__##array, }
631 
632 #include "trace/beauty/arch_errno_names.c"
633 #include "trace/beauty/eventfd.c"
634 #include "trace/beauty/futex_op.c"
635 #include "trace/beauty/futex_val3.c"
636 #include "trace/beauty/mmap.c"
637 #include "trace/beauty/mode_t.c"
638 #include "trace/beauty/msg_flags.c"
639 #include "trace/beauty/open_flags.c"
640 #include "trace/beauty/perf_event_open.c"
641 #include "trace/beauty/pid.c"
642 #include "trace/beauty/sched_policy.c"
643 #include "trace/beauty/seccomp.c"
644 #include "trace/beauty/signum.c"
645 #include "trace/beauty/socket_type.c"
646 #include "trace/beauty/waitid_options.c"
647 
648 struct syscall_arg_fmt {
649 	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
650 	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
651 	void	   *parm;
652 	const char *name;
653 	bool	   show_zero;
654 };
655 
656 static struct syscall_fmt {
657 	const char *name;
658 	const char *alias;
659 	struct syscall_arg_fmt arg[6];
660 	u8	   nr_args;
661 	bool	   errpid;
662 	bool	   timeout;
663 	bool	   hexret;
664 } syscall_fmts[] = {
665 	{ .name	    = "access",
666 	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
667 	{ .name	    = "arch_prctl",
668 	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
669 		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
670 	{ .name	    = "bind",
671 	  .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, }, },
672 	{ .name	    = "bpf",
673 	  .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
674 	{ .name	    = "brk",	    .hexret = true,
675 	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
676 	{ .name     = "clock_gettime",
677 	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
678 	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
679 	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
680 		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
681 		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
682 		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
683 		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
684 	{ .name	    = "close",
685 	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
686 	{ .name	    = "connect",
687 	  .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, }, },
688 	{ .name	    = "epoll_ctl",
689 	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
690 	{ .name	    = "eventfd2",
691 	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
692 	{ .name	    = "fchmodat",
693 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
694 	{ .name	    = "fchownat",
695 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
696 	{ .name	    = "fcntl",
697 	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
698 			   .parm      = &strarrays__fcntl_cmds_arrays,
699 			   .show_zero = true, },
700 		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
701 	{ .name	    = "flock",
702 	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
703 	{ .name	    = "fstat", .alias = "newfstat", },
704 	{ .name	    = "fstatat", .alias = "newfstatat", },
705 	{ .name	    = "futex",
706 	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
707 		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
708 	{ .name	    = "futimesat",
709 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
710 	{ .name	    = "getitimer",
711 	  .arg = { [0] = STRARRAY(which, itimers), }, },
712 	{ .name	    = "getpid",	    .errpid = true, },
713 	{ .name	    = "getpgid",    .errpid = true, },
714 	{ .name	    = "getppid",    .errpid = true, },
715 	{ .name	    = "getrandom",
716 	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
717 	{ .name	    = "getrlimit",
718 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
719 	{ .name	    = "gettid",	    .errpid = true, },
720 	{ .name	    = "ioctl",
721 	  .arg = {
722 #if defined(__i386__) || defined(__x86_64__)
723 /*
724  * FIXME: Make this available to all arches.
725  */
726 		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
727 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
728 #else
729 		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
730 #endif
731 	{ .name	    = "kcmp",	    .nr_args = 5,
732 	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
733 		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
734 		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
735 		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
736 		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
737 	{ .name	    = "keyctl",
738 	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
739 	{ .name	    = "kill",
740 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
741 	{ .name	    = "linkat",
742 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
743 	{ .name	    = "lseek",
744 	  .arg = { [2] = STRARRAY(whence, whences), }, },
745 	{ .name	    = "lstat", .alias = "newlstat", },
746 	{ .name     = "madvise",
747 	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
748 		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
749 	{ .name	    = "mkdirat",
750 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
751 	{ .name	    = "mknodat",
752 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
753 	{ .name	    = "mmap",	    .hexret = true,
754 /* The standard mmap maps to old_mmap on s390x */
755 #if defined(__s390x__)
756 	.alias = "old_mmap",
757 #endif
758 	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
759 		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */ },
760 		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
761 	{ .name	    = "mount",
762 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
763 		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
764 			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
765 	{ .name	    = "mprotect",
766 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
767 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ }, }, },
768 	{ .name	    = "mq_unlink",
769 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
770 	{ .name	    = "mremap",	    .hexret = true,
771 	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
772 	{ .name	    = "name_to_handle_at",
773 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
774 	{ .name	    = "newfstatat",
775 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
776 	{ .name	    = "open",
777 	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
778 	{ .name	    = "open_by_handle_at",
779 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
780 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
781 	{ .name	    = "openat",
782 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
783 		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
784 	{ .name	    = "perf_event_open",
785 	  .arg = { [2] = { .scnprintf = SCA_INT,	/* cpu */ },
786 		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
787 		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
788 	{ .name	    = "pipe2",
789 	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
790 	{ .name	    = "pkey_alloc",
791 	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
792 	{ .name	    = "pkey_free",
793 	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
794 	{ .name	    = "pkey_mprotect",
795 	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
796 		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
797 		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
798 	{ .name	    = "poll", .timeout = true, },
799 	{ .name	    = "ppoll", .timeout = true, },
800 	{ .name	    = "prctl",
801 	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
802 		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
803 		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
804 	{ .name	    = "pread", .alias = "pread64", },
805 	{ .name	    = "preadv", .alias = "pread", },
806 	{ .name	    = "prlimit64",
807 	  .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
808 	{ .name	    = "pwrite", .alias = "pwrite64", },
809 	{ .name	    = "readlinkat",
810 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
811 	{ .name	    = "recvfrom",
812 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
813 	{ .name	    = "recvmmsg",
814 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
815 	{ .name	    = "recvmsg",
816 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
817 	{ .name	    = "renameat",
818 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
819 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
820 	{ .name	    = "renameat2",
821 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
822 		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
823 		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
824 	{ .name	    = "rt_sigaction",
825 	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
826 	{ .name	    = "rt_sigprocmask",
827 	  .arg = { [0] = STRARRAY(how, sighow), }, },
828 	{ .name	    = "rt_sigqueueinfo",
829 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
830 	{ .name	    = "rt_tgsigqueueinfo",
831 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
832 	{ .name	    = "sched_setscheduler",
833 	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
834 	{ .name	    = "seccomp",
835 	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
836 		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
837 	{ .name	    = "select", .timeout = true, },
838 	{ .name	    = "sendmmsg",
839 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
840 	{ .name	    = "sendmsg",
841 	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
842 	{ .name	    = "sendto",
843 	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
844 		   [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
845 	{ .name	    = "set_tid_address", .errpid = true, },
846 	{ .name	    = "setitimer",
847 	  .arg = { [0] = STRARRAY(which, itimers), }, },
848 	{ .name	    = "setrlimit",
849 	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
850 	{ .name	    = "socket",
851 	  .arg = { [0] = STRARRAY(family, socket_families),
852 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
853 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
854 	{ .name	    = "socketpair",
855 	  .arg = { [0] = STRARRAY(family, socket_families),
856 		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
857 		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
858 	{ .name	    = "stat", .alias = "newstat", },
859 	{ .name	    = "statx",
860 	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
861 		   [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
862 		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
863 	{ .name	    = "swapoff",
864 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
865 	{ .name	    = "swapon",
866 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
867 	{ .name	    = "symlinkat",
868 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
869 	{ .name	    = "tgkill",
870 	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
871 	{ .name	    = "tkill",
872 	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
873 	{ .name     = "umount2", .alias = "umount",
874 	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
875 	{ .name	    = "uname", .alias = "newuname", },
876 	{ .name	    = "unlinkat",
877 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
878 	{ .name	    = "utimensat",
879 	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
880 	{ .name	    = "wait4",	    .errpid = true,
881 	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
882 	{ .name	    = "waitid",	    .errpid = true,
883 	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
884 };
885 
886 static int syscall_fmt__cmp(const void *name, const void *fmtp)
887 {
888 	const struct syscall_fmt *fmt = fmtp;
889 	return strcmp(name, fmt->name);
890 }
891 
892 static struct syscall_fmt *syscall_fmt__find(const char *name)
893 {
894 	const int nmemb = ARRAY_SIZE(syscall_fmts);
895 	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
896 }
897 
898 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
899 {
900 	int i, nmemb = ARRAY_SIZE(syscall_fmts);
901 
902 	for (i = 0; i < nmemb; ++i) {
903 		if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
904 			return &syscall_fmts[i];
905 	}
906 
907 	return NULL;
908 }
909 
910 /*
911  * is_exit: is this "exit" or "exit_group"?
912  * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
913  * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
914  */
915 struct syscall {
916 	struct tep_event    *tp_format;
917 	int		    nr_args;
918 	int		    args_size;
919 	bool		    is_exit;
920 	bool		    is_open;
921 	struct tep_format_field *args;
922 	const char	    *name;
923 	struct syscall_fmt  *fmt;
924 	struct syscall_arg_fmt *arg_fmt;
925 };
926 
927 struct bpf_map_syscall_entry {
928 	bool	enabled;
929 };
930 
931 /*
932  * We need to have this 'calculated' boolean because in some cases we really
933  * don't know what is the duration of a syscall, for instance, when we start
934  * a session and some threads are waiting for a syscall to finish, say 'poll',
935  * in which case all we can do is to print "( ? ) for duration and for the
936  * start timestamp.
937  */
938 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
939 {
940 	double duration = (double)t / NSEC_PER_MSEC;
941 	size_t printed = fprintf(fp, "(");
942 
943 	if (!calculated)
944 		printed += fprintf(fp, "         ");
945 	else if (duration >= 1.0)
946 		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
947 	else if (duration >= 0.01)
948 		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
949 	else
950 		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
951 	return printed + fprintf(fp, "): ");
952 }
953 
954 /**
955  * filename.ptr: The filename char pointer that will be vfs_getname'd
956  * filename.entry_str_pos: Where to insert the string translated from
957  *                         filename.ptr by the vfs_getname tracepoint/kprobe.
958  * ret_scnprintf: syscall args may set this to a different syscall return
959  *                formatter, for instance, fcntl may return fds, file flags, etc.
960  */
961 struct thread_trace {
962 	u64		  entry_time;
963 	bool		  entry_pending;
964 	unsigned long	  nr_events;
965 	unsigned long	  pfmaj, pfmin;
966 	char		  *entry_str;
967 	double		  runtime_ms;
968 	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
969         struct {
970 		unsigned long ptr;
971 		short int     entry_str_pos;
972 		bool	      pending_open;
973 		unsigned int  namelen;
974 		char	      *name;
975 	} filename;
976 	struct {
977 		int	  max;
978 		char	  **table;
979 	} paths;
980 
981 	struct intlist *syscall_stats;
982 };
983 
984 static struct thread_trace *thread_trace__new(void)
985 {
986 	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
987 
988 	if (ttrace)
989 		ttrace->paths.max = -1;
990 
991 	ttrace->syscall_stats = intlist__new(NULL);
992 
993 	return ttrace;
994 }
995 
996 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
997 {
998 	struct thread_trace *ttrace;
999 
1000 	if (thread == NULL)
1001 		goto fail;
1002 
1003 	if (thread__priv(thread) == NULL)
1004 		thread__set_priv(thread, thread_trace__new());
1005 
1006 	if (thread__priv(thread) == NULL)
1007 		goto fail;
1008 
1009 	ttrace = thread__priv(thread);
1010 	++ttrace->nr_events;
1011 
1012 	return ttrace;
1013 fail:
1014 	color_fprintf(fp, PERF_COLOR_RED,
1015 		      "WARNING: not enough memory, dropping samples!\n");
1016 	return NULL;
1017 }
1018 
1019 
1020 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1021 				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1022 {
1023 	struct thread_trace *ttrace = thread__priv(arg->thread);
1024 
1025 	ttrace->ret_scnprintf = ret_scnprintf;
1026 }
1027 
1028 #define TRACE_PFMAJ		(1 << 0)
1029 #define TRACE_PFMIN		(1 << 1)
1030 
1031 static const size_t trace__entry_str_size = 2048;
1032 
1033 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1034 {
1035 	struct thread_trace *ttrace = thread__priv(thread);
1036 
1037 	if (fd > ttrace->paths.max) {
1038 		char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
1039 
1040 		if (npath == NULL)
1041 			return -1;
1042 
1043 		if (ttrace->paths.max != -1) {
1044 			memset(npath + ttrace->paths.max + 1, 0,
1045 			       (fd - ttrace->paths.max) * sizeof(char *));
1046 		} else {
1047 			memset(npath, 0, (fd + 1) * sizeof(char *));
1048 		}
1049 
1050 		ttrace->paths.table = npath;
1051 		ttrace->paths.max   = fd;
1052 	}
1053 
1054 	ttrace->paths.table[fd] = strdup(pathname);
1055 
1056 	return ttrace->paths.table[fd] != NULL ? 0 : -1;
1057 }
1058 
1059 static int thread__read_fd_path(struct thread *thread, int fd)
1060 {
1061 	char linkname[PATH_MAX], pathname[PATH_MAX];
1062 	struct stat st;
1063 	int ret;
1064 
1065 	if (thread->pid_ == thread->tid) {
1066 		scnprintf(linkname, sizeof(linkname),
1067 			  "/proc/%d/fd/%d", thread->pid_, fd);
1068 	} else {
1069 		scnprintf(linkname, sizeof(linkname),
1070 			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1071 	}
1072 
1073 	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1074 		return -1;
1075 
1076 	ret = readlink(linkname, pathname, sizeof(pathname));
1077 
1078 	if (ret < 0 || ret > st.st_size)
1079 		return -1;
1080 
1081 	pathname[ret] = '\0';
1082 	return trace__set_fd_pathname(thread, fd, pathname);
1083 }
1084 
1085 static const char *thread__fd_path(struct thread *thread, int fd,
1086 				   struct trace *trace)
1087 {
1088 	struct thread_trace *ttrace = thread__priv(thread);
1089 
1090 	if (ttrace == NULL)
1091 		return NULL;
1092 
1093 	if (fd < 0)
1094 		return NULL;
1095 
1096 	if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
1097 		if (!trace->live)
1098 			return NULL;
1099 		++trace->stats.proc_getname;
1100 		if (thread__read_fd_path(thread, fd))
1101 			return NULL;
1102 	}
1103 
1104 	return ttrace->paths.table[fd];
1105 }
1106 
1107 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1108 {
1109 	int fd = arg->val;
1110 	size_t printed = scnprintf(bf, size, "%d", fd);
1111 	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1112 
1113 	if (path)
1114 		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1115 
1116 	return printed;
1117 }
1118 
1119 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1120 {
1121         size_t printed = scnprintf(bf, size, "%d", fd);
1122 	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1123 
1124 	if (thread) {
1125 		const char *path = thread__fd_path(thread, fd, trace);
1126 
1127 		if (path)
1128 			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1129 
1130 		thread__put(thread);
1131 	}
1132 
1133         return printed;
1134 }
1135 
1136 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1137 					      struct syscall_arg *arg)
1138 {
1139 	int fd = arg->val;
1140 	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1141 	struct thread_trace *ttrace = thread__priv(arg->thread);
1142 
1143 	if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1144 		zfree(&ttrace->paths.table[fd]);
1145 
1146 	return printed;
1147 }
1148 
1149 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1150 				     unsigned long ptr)
1151 {
1152 	struct thread_trace *ttrace = thread__priv(thread);
1153 
1154 	ttrace->filename.ptr = ptr;
1155 	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1156 }
1157 
1158 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1159 {
1160 	struct augmented_arg *augmented_arg = arg->augmented.args;
1161 
1162 	return scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1163 }
1164 
1165 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1166 					      struct syscall_arg *arg)
1167 {
1168 	unsigned long ptr = arg->val;
1169 
1170 	if (arg->augmented.args)
1171 		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1172 
1173 	if (!arg->trace->vfs_getname)
1174 		return scnprintf(bf, size, "%#x", ptr);
1175 
1176 	thread__set_filename_pos(arg->thread, bf, ptr);
1177 	return 0;
1178 }
1179 
1180 static bool trace__filter_duration(struct trace *trace, double t)
1181 {
1182 	return t < (trace->duration_filter * NSEC_PER_MSEC);
1183 }
1184 
1185 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1186 {
1187 	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1188 
1189 	return fprintf(fp, "%10.3f ", ts);
1190 }
1191 
1192 /*
1193  * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1194  * using ttrace->entry_time for a thread that receives a sys_exit without
1195  * first having received a sys_enter ("poll" issued before tracing session
1196  * starts, lost sys_enter exit due to ring buffer overflow).
1197  */
1198 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1199 {
1200 	if (tstamp > 0)
1201 		return __trace__fprintf_tstamp(trace, tstamp, fp);
1202 
1203 	return fprintf(fp, "         ? ");
1204 }
1205 
1206 static bool done = false;
1207 static bool interrupted = false;
1208 
1209 static void sig_handler(int sig)
1210 {
1211 	done = true;
1212 	interrupted = sig == SIGINT;
1213 }
1214 
1215 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1216 {
1217 	size_t printed = 0;
1218 
1219 	if (trace->multiple_threads) {
1220 		if (trace->show_comm)
1221 			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1222 		printed += fprintf(fp, "%d ", thread->tid);
1223 	}
1224 
1225 	return printed;
1226 }
1227 
1228 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1229 					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1230 {
1231 	size_t printed = 0;
1232 
1233 	if (trace->show_tstamp)
1234 		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1235 	if (trace->show_duration)
1236 		printed += fprintf_duration(duration, duration_calculated, fp);
1237 	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1238 }
1239 
1240 static int trace__process_event(struct trace *trace, struct machine *machine,
1241 				union perf_event *event, struct perf_sample *sample)
1242 {
1243 	int ret = 0;
1244 
1245 	switch (event->header.type) {
1246 	case PERF_RECORD_LOST:
1247 		color_fprintf(trace->output, PERF_COLOR_RED,
1248 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1249 		ret = machine__process_lost_event(machine, event, sample);
1250 		break;
1251 	default:
1252 		ret = machine__process_event(machine, event, sample);
1253 		break;
1254 	}
1255 
1256 	return ret;
1257 }
1258 
1259 static int trace__tool_process(struct perf_tool *tool,
1260 			       union perf_event *event,
1261 			       struct perf_sample *sample,
1262 			       struct machine *machine)
1263 {
1264 	struct trace *trace = container_of(tool, struct trace, tool);
1265 	return trace__process_event(trace, machine, event, sample);
1266 }
1267 
1268 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1269 {
1270 	struct machine *machine = vmachine;
1271 
1272 	if (machine->kptr_restrict_warned)
1273 		return NULL;
1274 
1275 	if (symbol_conf.kptr_restrict) {
1276 		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1277 			   "Check /proc/sys/kernel/kptr_restrict.\n\n"
1278 			   "Kernel samples will not be resolved.\n");
1279 		machine->kptr_restrict_warned = true;
1280 		return NULL;
1281 	}
1282 
1283 	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1284 }
1285 
1286 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1287 {
1288 	int err = symbol__init(NULL);
1289 
1290 	if (err)
1291 		return err;
1292 
1293 	trace->host = machine__new_host();
1294 	if (trace->host == NULL)
1295 		return -ENOMEM;
1296 
1297 	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1298 	if (err < 0)
1299 		goto out;
1300 
1301 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1302 					    evlist->threads, trace__tool_process, false,
1303 					    1);
1304 out:
1305 	if (err)
1306 		symbol__exit();
1307 
1308 	return err;
1309 }
1310 
1311 static void trace__symbols__exit(struct trace *trace)
1312 {
1313 	machine__exit(trace->host);
1314 	trace->host = NULL;
1315 
1316 	symbol__exit();
1317 }
1318 
1319 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1320 {
1321 	int idx;
1322 
1323 	if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1324 		nr_args = sc->fmt->nr_args;
1325 
1326 	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1327 	if (sc->arg_fmt == NULL)
1328 		return -1;
1329 
1330 	for (idx = 0; idx < nr_args; ++idx) {
1331 		if (sc->fmt)
1332 			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1333 	}
1334 
1335 	sc->nr_args = nr_args;
1336 	return 0;
1337 }
1338 
1339 static int syscall__set_arg_fmts(struct syscall *sc)
1340 {
1341 	struct tep_format_field *field, *last_field = NULL;
1342 	int idx = 0, len;
1343 
1344 	for (field = sc->args; field; field = field->next, ++idx) {
1345 		last_field = field;
1346 
1347 		if (sc->fmt && sc->fmt->arg[idx].scnprintf)
1348 			continue;
1349 
1350 		if (strcmp(field->type, "const char *") == 0 &&
1351 			 (strcmp(field->name, "filename") == 0 ||
1352 			  strcmp(field->name, "path") == 0 ||
1353 			  strcmp(field->name, "pathname") == 0))
1354 			sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
1355 		else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1356 			sc->arg_fmt[idx].scnprintf = SCA_PTR;
1357 		else if (strcmp(field->type, "pid_t") == 0)
1358 			sc->arg_fmt[idx].scnprintf = SCA_PID;
1359 		else if (strcmp(field->type, "umode_t") == 0)
1360 			sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
1361 		else if ((strcmp(field->type, "int") == 0 ||
1362 			  strcmp(field->type, "unsigned int") == 0 ||
1363 			  strcmp(field->type, "long") == 0) &&
1364 			 (len = strlen(field->name)) >= 2 &&
1365 			 strcmp(field->name + len - 2, "fd") == 0) {
1366 			/*
1367 			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1368 			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1369 			 * 65 int
1370 			 * 23 unsigned int
1371 			 * 7 unsigned long
1372 			 */
1373 			sc->arg_fmt[idx].scnprintf = SCA_FD;
1374 		}
1375 	}
1376 
1377 	if (last_field)
1378 		sc->args_size = last_field->offset + last_field->size;
1379 
1380 	return 0;
1381 }
1382 
1383 static int trace__read_syscall_info(struct trace *trace, int id)
1384 {
1385 	char tp_name[128];
1386 	struct syscall *sc;
1387 	const char *name = syscalltbl__name(trace->sctbl, id);
1388 
1389 	if (name == NULL)
1390 		return -1;
1391 
1392 	if (id > trace->syscalls.max) {
1393 		struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1394 
1395 		if (nsyscalls == NULL)
1396 			return -1;
1397 
1398 		if (trace->syscalls.max != -1) {
1399 			memset(nsyscalls + trace->syscalls.max + 1, 0,
1400 			       (id - trace->syscalls.max) * sizeof(*sc));
1401 		} else {
1402 			memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1403 		}
1404 
1405 		trace->syscalls.table = nsyscalls;
1406 		trace->syscalls.max   = id;
1407 	}
1408 
1409 	sc = trace->syscalls.table + id;
1410 	sc->name = name;
1411 
1412 	sc->fmt  = syscall_fmt__find(sc->name);
1413 
1414 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1415 	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1416 
1417 	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1418 		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1419 		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1420 	}
1421 
1422 	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1423 		return -1;
1424 
1425 	if (IS_ERR(sc->tp_format))
1426 		return -1;
1427 
1428 	sc->args = sc->tp_format->format.fields;
1429 	/*
1430 	 * We need to check and discard the first variable '__syscall_nr'
1431 	 * or 'nr' that mean the syscall number. It is needless here.
1432 	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1433 	 */
1434 	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1435 		sc->args = sc->args->next;
1436 		--sc->nr_args;
1437 	}
1438 
1439 	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1440 	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1441 
1442 	return syscall__set_arg_fmts(sc);
1443 }
1444 
1445 static int trace__validate_ev_qualifier(struct trace *trace)
1446 {
1447 	int err = 0, i;
1448 	size_t nr_allocated;
1449 	struct str_node *pos;
1450 
1451 	trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1452 	trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1453 						 sizeof(trace->ev_qualifier_ids.entries[0]));
1454 
1455 	if (trace->ev_qualifier_ids.entries == NULL) {
1456 		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1457 		       trace->output);
1458 		err = -EINVAL;
1459 		goto out;
1460 	}
1461 
1462 	nr_allocated = trace->ev_qualifier_ids.nr;
1463 	i = 0;
1464 
1465 	strlist__for_each_entry(pos, trace->ev_qualifier) {
1466 		const char *sc = pos->s;
1467 		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1468 
1469 		if (id < 0) {
1470 			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1471 			if (id >= 0)
1472 				goto matches;
1473 
1474 			if (err == 0) {
1475 				fputs("Error:\tInvalid syscall ", trace->output);
1476 				err = -EINVAL;
1477 			} else {
1478 				fputs(", ", trace->output);
1479 			}
1480 
1481 			fputs(sc, trace->output);
1482 		}
1483 matches:
1484 		trace->ev_qualifier_ids.entries[i++] = id;
1485 		if (match_next == -1)
1486 			continue;
1487 
1488 		while (1) {
1489 			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1490 			if (id < 0)
1491 				break;
1492 			if (nr_allocated == trace->ev_qualifier_ids.nr) {
1493 				void *entries;
1494 
1495 				nr_allocated += 8;
1496 				entries = realloc(trace->ev_qualifier_ids.entries,
1497 						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1498 				if (entries == NULL) {
1499 					err = -ENOMEM;
1500 					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1501 					goto out_free;
1502 				}
1503 				trace->ev_qualifier_ids.entries = entries;
1504 			}
1505 			trace->ev_qualifier_ids.nr++;
1506 			trace->ev_qualifier_ids.entries[i++] = id;
1507 		}
1508 	}
1509 
1510 	if (err < 0) {
1511 		fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1512 		      "\nHint:\tand: 'man syscalls'\n", trace->output);
1513 out_free:
1514 		zfree(&trace->ev_qualifier_ids.entries);
1515 		trace->ev_qualifier_ids.nr = 0;
1516 	}
1517 out:
1518 	return err;
1519 }
1520 
1521 /*
1522  * args is to be interpreted as a series of longs but we need to handle
1523  * 8-byte unaligned accesses. args points to raw_data within the event
1524  * and raw_data is guaranteed to be 8-byte unaligned because it is
1525  * preceded by raw_size which is a u32. So we need to copy args to a temp
1526  * variable to read it. Most notably this avoids extended load instructions
1527  * on unaligned addresses
1528  */
1529 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1530 {
1531 	unsigned long val;
1532 	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1533 
1534 	memcpy(&val, p, sizeof(val));
1535 	return val;
1536 }
1537 
1538 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1539 				      struct syscall_arg *arg)
1540 {
1541 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1542 		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1543 
1544 	return scnprintf(bf, size, "arg%d: ", arg->idx);
1545 }
1546 
1547 /*
1548  * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1549  * as mount 'flags' argument that needs ignoring some magic flag, see comment
1550  * in tools/perf/trace/beauty/mount_flags.c
1551  */
1552 static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
1553 {
1554 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
1555 		return sc->arg_fmt[arg->idx].mask_val(arg, val);
1556 
1557 	return val;
1558 }
1559 
1560 static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
1561 				     struct syscall_arg *arg, unsigned long val)
1562 {
1563 	if (sc->arg_fmt && sc->arg_fmt[arg->idx].scnprintf) {
1564 		arg->val = val;
1565 		if (sc->arg_fmt[arg->idx].parm)
1566 			arg->parm = sc->arg_fmt[arg->idx].parm;
1567 		return sc->arg_fmt[arg->idx].scnprintf(bf, size, arg);
1568 	}
1569 	return scnprintf(bf, size, "%ld", val);
1570 }
1571 
1572 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1573 				      unsigned char *args, void *augmented_args, int augmented_args_size,
1574 				      struct trace *trace, struct thread *thread)
1575 {
1576 	size_t printed = 0;
1577 	unsigned long val;
1578 	u8 bit = 1;
1579 	struct syscall_arg arg = {
1580 		.args	= args,
1581 		.augmented = {
1582 			.size = augmented_args_size,
1583 			.args = augmented_args,
1584 		},
1585 		.idx	= 0,
1586 		.mask	= 0,
1587 		.trace  = trace,
1588 		.thread = thread,
1589 		.show_string_prefix = trace->show_string_prefix,
1590 	};
1591 	struct thread_trace *ttrace = thread__priv(thread);
1592 
1593 	/*
1594 	 * Things like fcntl will set this in its 'cmd' formatter to pick the
1595 	 * right formatter for the return value (an fd? file flags?), which is
1596 	 * not needed for syscalls that always return a given type, say an fd.
1597 	 */
1598 	ttrace->ret_scnprintf = NULL;
1599 
1600 	if (sc->args != NULL) {
1601 		struct tep_format_field *field;
1602 
1603 		for (field = sc->args; field;
1604 		     field = field->next, ++arg.idx, bit <<= 1) {
1605 			if (arg.mask & bit)
1606 				continue;
1607 
1608 			val = syscall_arg__val(&arg, arg.idx);
1609 			/*
1610 			 * Some syscall args need some mask, most don't and
1611 			 * return val untouched.
1612 			 */
1613 			val = syscall__mask_val(sc, &arg, val);
1614 
1615 			/*
1616  			 * Suppress this argument if its value is zero and
1617  			 * and we don't have a string associated in an
1618  			 * strarray for it.
1619  			 */
1620 			if (val == 0 &&
1621 			    !trace->show_zeros &&
1622 			    !(sc->arg_fmt &&
1623 			      (sc->arg_fmt[arg.idx].show_zero ||
1624 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
1625 			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
1626 			      sc->arg_fmt[arg.idx].parm))
1627 				continue;
1628 
1629 			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
1630 
1631 			if (trace->show_arg_names)
1632 				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
1633 
1634 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1635 		}
1636 	} else if (IS_ERR(sc->tp_format)) {
1637 		/*
1638 		 * If we managed to read the tracepoint /format file, then we
1639 		 * may end up not having any args, like with gettid(), so only
1640 		 * print the raw args when we didn't manage to read it.
1641 		 */
1642 		while (arg.idx < sc->nr_args) {
1643 			if (arg.mask & bit)
1644 				goto next_arg;
1645 			val = syscall_arg__val(&arg, arg.idx);
1646 			if (printed)
1647 				printed += scnprintf(bf + printed, size - printed, ", ");
1648 			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
1649 			printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
1650 next_arg:
1651 			++arg.idx;
1652 			bit <<= 1;
1653 		}
1654 	}
1655 
1656 	return printed;
1657 }
1658 
1659 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1660 				  union perf_event *event,
1661 				  struct perf_sample *sample);
1662 
1663 static struct syscall *trace__syscall_info(struct trace *trace,
1664 					   struct perf_evsel *evsel, int id)
1665 {
1666 
1667 	if (id < 0) {
1668 
1669 		/*
1670 		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1671 		 * before that, leaving at a higher verbosity level till that is
1672 		 * explained. Reproduced with plain ftrace with:
1673 		 *
1674 		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1675 		 * grep "NR -1 " /t/trace_pipe
1676 		 *
1677 		 * After generating some load on the machine.
1678  		 */
1679 		if (verbose > 1) {
1680 			static u64 n;
1681 			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1682 				id, perf_evsel__name(evsel), ++n);
1683 		}
1684 		return NULL;
1685 	}
1686 
1687 	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1688 	    trace__read_syscall_info(trace, id))
1689 		goto out_cant_read;
1690 
1691 	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1692 		goto out_cant_read;
1693 
1694 	return &trace->syscalls.table[id];
1695 
1696 out_cant_read:
1697 	if (verbose > 0) {
1698 		fprintf(trace->output, "Problems reading syscall %d", id);
1699 		if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1700 			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1701 		fputs(" information\n", trace->output);
1702 	}
1703 	return NULL;
1704 }
1705 
1706 static void thread__update_stats(struct thread_trace *ttrace,
1707 				 int id, struct perf_sample *sample)
1708 {
1709 	struct int_node *inode;
1710 	struct stats *stats;
1711 	u64 duration = 0;
1712 
1713 	inode = intlist__findnew(ttrace->syscall_stats, id);
1714 	if (inode == NULL)
1715 		return;
1716 
1717 	stats = inode->priv;
1718 	if (stats == NULL) {
1719 		stats = malloc(sizeof(struct stats));
1720 		if (stats == NULL)
1721 			return;
1722 		init_stats(stats);
1723 		inode->priv = stats;
1724 	}
1725 
1726 	if (ttrace->entry_time && sample->time > ttrace->entry_time)
1727 		duration = sample->time - ttrace->entry_time;
1728 
1729 	update_stats(stats, duration);
1730 }
1731 
1732 static int trace__printf_interrupted_entry(struct trace *trace)
1733 {
1734 	struct thread_trace *ttrace;
1735 	size_t printed;
1736 
1737 	if (trace->failure_only || trace->current == NULL)
1738 		return 0;
1739 
1740 	ttrace = thread__priv(trace->current);
1741 
1742 	if (!ttrace->entry_pending)
1743 		return 0;
1744 
1745 	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1746 	printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str);
1747 	ttrace->entry_pending = false;
1748 
1749 	++trace->nr_events_printed;
1750 
1751 	return printed;
1752 }
1753 
1754 static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
1755 				 struct perf_sample *sample, struct thread *thread)
1756 {
1757 	int printed = 0;
1758 
1759 	if (trace->print_sample) {
1760 		double ts = (double)sample->time / NSEC_PER_MSEC;
1761 
1762 		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
1763 				   perf_evsel__name(evsel), ts,
1764 				   thread__comm_str(thread),
1765 				   sample->pid, sample->tid, sample->cpu);
1766 	}
1767 
1768 	return printed;
1769 }
1770 
1771 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
1772 {
1773 	void *augmented_args = NULL;
1774 	/*
1775 	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1776 	 * and there we get all 6 syscall args plus the tracepoint common
1777 	 * fields (sizeof(long)) and the syscall_nr (another long). So we check
1778 	 * if that is the case and if so don't look after the sc->args_size,
1779 	 * but always after the full raw_syscalls:sys_enter payload, which is
1780 	 * fixed.
1781 	 *
1782 	 * We'll revisit this later to pass s->args_size to the BPF augmenter
1783 	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1784 	 * copies only what we need for each syscall, like what happens when we
1785 	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1786 	 * traffic to just what is needed for each syscall.
1787 	 */
1788 	int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
1789 
1790 	*augmented_args_size = sample->raw_size - args_size;
1791 	if (*augmented_args_size > 0)
1792 		augmented_args = sample->raw_data + args_size;
1793 
1794 	return augmented_args;
1795 }
1796 
1797 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1798 			    union perf_event *event __maybe_unused,
1799 			    struct perf_sample *sample)
1800 {
1801 	char *msg;
1802 	void *args;
1803 	int printed = 0;
1804 	struct thread *thread;
1805 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1806 	int augmented_args_size = 0;
1807 	void *augmented_args = NULL;
1808 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1809 	struct thread_trace *ttrace;
1810 
1811 	if (sc == NULL)
1812 		return -1;
1813 
1814 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1815 	ttrace = thread__trace(thread, trace->output);
1816 	if (ttrace == NULL)
1817 		goto out_put;
1818 
1819 	trace__fprintf_sample(trace, evsel, sample, thread);
1820 
1821 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1822 
1823 	if (ttrace->entry_str == NULL) {
1824 		ttrace->entry_str = malloc(trace__entry_str_size);
1825 		if (!ttrace->entry_str)
1826 			goto out_put;
1827 	}
1828 
1829 	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1830 		trace__printf_interrupted_entry(trace);
1831 	/*
1832 	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1833 	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1834 	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1835 	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1836 	 * so when handling, say the openat syscall, we end up getting 6 args for the
1837 	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1838 	 * thinking that the extra 2 u64 args are the augmented filename, so just check
1839 	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1840 	 */
1841 	if (evsel != trace->syscalls.events.sys_enter)
1842 		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
1843 	ttrace->entry_time = sample->time;
1844 	msg = ttrace->entry_str;
1845 	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1846 
1847 	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1848 					   args, augmented_args, augmented_args_size, trace, thread);
1849 
1850 	if (sc->is_exit) {
1851 		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
1852 			int alignment = 0;
1853 
1854 			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
1855 			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
1856 			if (trace->args_alignment > printed)
1857 				alignment = trace->args_alignment - printed;
1858 			fprintf(trace->output, "%*s= ?\n", alignment, " ");
1859 		}
1860 	} else {
1861 		ttrace->entry_pending = true;
1862 		/* See trace__vfs_getname & trace__sys_exit */
1863 		ttrace->filename.pending_open = false;
1864 	}
1865 
1866 	if (trace->current != thread) {
1867 		thread__put(trace->current);
1868 		trace->current = thread__get(thread);
1869 	}
1870 	err = 0;
1871 out_put:
1872 	thread__put(thread);
1873 	return err;
1874 }
1875 
1876 static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evsel,
1877 				    struct perf_sample *sample)
1878 {
1879 	struct thread_trace *ttrace;
1880 	struct thread *thread;
1881 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1882 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1883 	char msg[1024];
1884 	void *args, *augmented_args = NULL;
1885 	int augmented_args_size;
1886 
1887 	if (sc == NULL)
1888 		return -1;
1889 
1890 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1891 	ttrace = thread__trace(thread, trace->output);
1892 	/*
1893 	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
1894 	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
1895 	 */
1896 	if (ttrace == NULL)
1897 		goto out_put;
1898 
1899 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1900 	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
1901 	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
1902 	fprintf(trace->output, "%s", msg);
1903 	err = 0;
1904 out_put:
1905 	thread__put(thread);
1906 	return err;
1907 }
1908 
1909 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1910 				    struct perf_sample *sample,
1911 				    struct callchain_cursor *cursor)
1912 {
1913 	struct addr_location al;
1914 	int max_stack = evsel->attr.sample_max_stack ?
1915 			evsel->attr.sample_max_stack :
1916 			trace->max_stack;
1917 	int err;
1918 
1919 	if (machine__resolve(trace->host, &al, sample) < 0)
1920 		return -1;
1921 
1922 	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
1923 	addr_location__put(&al);
1924 	return err;
1925 }
1926 
1927 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1928 {
1929 	/* TODO: user-configurable print_opts */
1930 	const unsigned int print_opts = EVSEL__PRINT_SYM |
1931 				        EVSEL__PRINT_DSO |
1932 				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
1933 
1934 	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1935 }
1936 
1937 static const char *errno_to_name(struct perf_evsel *evsel, int err)
1938 {
1939 	struct perf_env *env = perf_evsel__env(evsel);
1940 	const char *arch_name = perf_env__arch(env);
1941 
1942 	return arch_syscalls__strerrno(arch_name, err);
1943 }
1944 
1945 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1946 			   union perf_event *event __maybe_unused,
1947 			   struct perf_sample *sample)
1948 {
1949 	long ret;
1950 	u64 duration = 0;
1951 	bool duration_calculated = false;
1952 	struct thread *thread;
1953 	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
1954 	int alignment = trace->args_alignment;
1955 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1956 	struct thread_trace *ttrace;
1957 
1958 	if (sc == NULL)
1959 		return -1;
1960 
1961 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1962 	ttrace = thread__trace(thread, trace->output);
1963 	if (ttrace == NULL)
1964 		goto out_put;
1965 
1966 	trace__fprintf_sample(trace, evsel, sample, thread);
1967 
1968 	if (trace->summary)
1969 		thread__update_stats(ttrace, id, sample);
1970 
1971 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1972 
1973 	if (sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
1974 		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1975 		ttrace->filename.pending_open = false;
1976 		++trace->stats.vfs_getname;
1977 	}
1978 
1979 	if (ttrace->entry_time) {
1980 		duration = sample->time - ttrace->entry_time;
1981 		if (trace__filter_duration(trace, duration))
1982 			goto out;
1983 		duration_calculated = true;
1984 	} else if (trace->duration_filter)
1985 		goto out;
1986 
1987 	if (sample->callchain) {
1988 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1989 		if (callchain_ret == 0) {
1990 			if (callchain_cursor.nr < trace->min_stack)
1991 				goto out;
1992 			callchain_ret = 1;
1993 		}
1994 	}
1995 
1996 	if (trace->summary_only || (ret >= 0 && trace->failure_only))
1997 		goto out;
1998 
1999 	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2000 
2001 	if (ttrace->entry_pending) {
2002 		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2003 	} else {
2004 		fprintf(trace->output, " ... [");
2005 		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2006 		fprintf(trace->output, "]: %s()", sc->name);
2007 	}
2008 
2009 	printed++; /* the closing ')' */
2010 
2011 	if (alignment > printed)
2012 		alignment -= printed;
2013 	else
2014 		alignment = 0;
2015 
2016 	fprintf(trace->output, ")%*s= ", alignment, " ");
2017 
2018 	if (sc->fmt == NULL) {
2019 		if (ret < 0)
2020 			goto errno_print;
2021 signed_print:
2022 		fprintf(trace->output, "%ld", ret);
2023 	} else if (ret < 0) {
2024 errno_print: {
2025 		char bf[STRERR_BUFSIZE];
2026 		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2027 			   *e = errno_to_name(evsel, -ret);
2028 
2029 		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2030 	}
2031 	} else if (ret == 0 && sc->fmt->timeout)
2032 		fprintf(trace->output, "0 (Timeout)");
2033 	else if (ttrace->ret_scnprintf) {
2034 		char bf[1024];
2035 		struct syscall_arg arg = {
2036 			.val	= ret,
2037 			.thread	= thread,
2038 			.trace	= trace,
2039 		};
2040 		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2041 		ttrace->ret_scnprintf = NULL;
2042 		fprintf(trace->output, "%s", bf);
2043 	} else if (sc->fmt->hexret)
2044 		fprintf(trace->output, "%#lx", ret);
2045 	else if (sc->fmt->errpid) {
2046 		struct thread *child = machine__find_thread(trace->host, ret, ret);
2047 
2048 		if (child != NULL) {
2049 			fprintf(trace->output, "%ld", ret);
2050 			if (child->comm_set)
2051 				fprintf(trace->output, " (%s)", thread__comm_str(child));
2052 			thread__put(child);
2053 		}
2054 	} else
2055 		goto signed_print;
2056 
2057 	fputc('\n', trace->output);
2058 
2059 	/*
2060 	 * We only consider an 'event' for the sake of --max-events a non-filtered
2061 	 * sys_enter + sys_exit and other tracepoint events.
2062 	 */
2063 	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2064 		interrupted = true;
2065 
2066 	if (callchain_ret > 0)
2067 		trace__fprintf_callchain(trace, sample);
2068 	else if (callchain_ret < 0)
2069 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2070 out:
2071 	ttrace->entry_pending = false;
2072 	err = 0;
2073 out_put:
2074 	thread__put(thread);
2075 	return err;
2076 }
2077 
2078 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
2079 			      union perf_event *event __maybe_unused,
2080 			      struct perf_sample *sample)
2081 {
2082 	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2083 	struct thread_trace *ttrace;
2084 	size_t filename_len, entry_str_len, to_move;
2085 	ssize_t remaining_space;
2086 	char *pos;
2087 	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2088 
2089 	if (!thread)
2090 		goto out;
2091 
2092 	ttrace = thread__priv(thread);
2093 	if (!ttrace)
2094 		goto out_put;
2095 
2096 	filename_len = strlen(filename);
2097 	if (filename_len == 0)
2098 		goto out_put;
2099 
2100 	if (ttrace->filename.namelen < filename_len) {
2101 		char *f = realloc(ttrace->filename.name, filename_len + 1);
2102 
2103 		if (f == NULL)
2104 			goto out_put;
2105 
2106 		ttrace->filename.namelen = filename_len;
2107 		ttrace->filename.name = f;
2108 	}
2109 
2110 	strcpy(ttrace->filename.name, filename);
2111 	ttrace->filename.pending_open = true;
2112 
2113 	if (!ttrace->filename.ptr)
2114 		goto out_put;
2115 
2116 	entry_str_len = strlen(ttrace->entry_str);
2117 	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2118 	if (remaining_space <= 0)
2119 		goto out_put;
2120 
2121 	if (filename_len > (size_t)remaining_space) {
2122 		filename += filename_len - remaining_space;
2123 		filename_len = remaining_space;
2124 	}
2125 
2126 	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2127 	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2128 	memmove(pos + filename_len, pos, to_move);
2129 	memcpy(pos, filename, filename_len);
2130 
2131 	ttrace->filename.ptr = 0;
2132 	ttrace->filename.entry_str_pos = 0;
2133 out_put:
2134 	thread__put(thread);
2135 out:
2136 	return 0;
2137 }
2138 
2139 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
2140 				     union perf_event *event __maybe_unused,
2141 				     struct perf_sample *sample)
2142 {
2143         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2144 	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2145 	struct thread *thread = machine__findnew_thread(trace->host,
2146 							sample->pid,
2147 							sample->tid);
2148 	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2149 
2150 	if (ttrace == NULL)
2151 		goto out_dump;
2152 
2153 	ttrace->runtime_ms += runtime_ms;
2154 	trace->runtime_ms += runtime_ms;
2155 out_put:
2156 	thread__put(thread);
2157 	return 0;
2158 
2159 out_dump:
2160 	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2161 	       evsel->name,
2162 	       perf_evsel__strval(evsel, sample, "comm"),
2163 	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2164 	       runtime,
2165 	       perf_evsel__intval(evsel, sample, "vruntime"));
2166 	goto out_put;
2167 }
2168 
2169 static int bpf_output__printer(enum binary_printer_ops op,
2170 			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2171 {
2172 	unsigned char ch = (unsigned char)val;
2173 
2174 	switch (op) {
2175 	case BINARY_PRINT_CHAR_DATA:
2176 		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2177 	case BINARY_PRINT_DATA_BEGIN:
2178 	case BINARY_PRINT_LINE_BEGIN:
2179 	case BINARY_PRINT_ADDR:
2180 	case BINARY_PRINT_NUM_DATA:
2181 	case BINARY_PRINT_NUM_PAD:
2182 	case BINARY_PRINT_SEP:
2183 	case BINARY_PRINT_CHAR_PAD:
2184 	case BINARY_PRINT_LINE_END:
2185 	case BINARY_PRINT_DATA_END:
2186 	default:
2187 		break;
2188 	}
2189 
2190 	return 0;
2191 }
2192 
2193 static void bpf_output__fprintf(struct trace *trace,
2194 				struct perf_sample *sample)
2195 {
2196 	binary__fprintf(sample->raw_data, sample->raw_size, 8,
2197 			bpf_output__printer, NULL, trace->output);
2198 	++trace->nr_events_printed;
2199 }
2200 
2201 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
2202 				union perf_event *event __maybe_unused,
2203 				struct perf_sample *sample)
2204 {
2205 	struct thread *thread;
2206 	int callchain_ret = 0;
2207 	/*
2208 	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2209 	 * this event's max_events having been hit and this is an entry coming
2210 	 * from the ring buffer that we should discard, since the max events
2211 	 * have already been considered/printed.
2212 	 */
2213 	if (evsel->disabled)
2214 		return 0;
2215 
2216 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2217 
2218 	if (sample->callchain) {
2219 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2220 		if (callchain_ret == 0) {
2221 			if (callchain_cursor.nr < trace->min_stack)
2222 				goto out;
2223 			callchain_ret = 1;
2224 		}
2225 	}
2226 
2227 	trace__printf_interrupted_entry(trace);
2228 	trace__fprintf_tstamp(trace, sample->time, trace->output);
2229 
2230 	if (trace->trace_syscalls && trace->show_duration)
2231 		fprintf(trace->output, "(         ): ");
2232 
2233 	if (thread)
2234 		trace__fprintf_comm_tid(trace, thread, trace->output);
2235 
2236 	if (evsel == trace->syscalls.events.augmented) {
2237 		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2238 		struct syscall *sc = trace__syscall_info(trace, evsel, id);
2239 
2240 		if (sc) {
2241 			fprintf(trace->output, "%s(", sc->name);
2242 			trace__fprintf_sys_enter(trace, evsel, sample);
2243 			fputc(')', trace->output);
2244 			goto newline;
2245 		}
2246 
2247 		/*
2248 		 * XXX: Not having the associated syscall info or not finding/adding
2249 		 * 	the thread should never happen, but if it does...
2250 		 * 	fall thru and print it as a bpf_output event.
2251 		 */
2252 	}
2253 
2254 	fprintf(trace->output, "%s:", evsel->name);
2255 
2256 	if (perf_evsel__is_bpf_output(evsel)) {
2257 		bpf_output__fprintf(trace, sample);
2258 	} else if (evsel->tp_format) {
2259 		if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2260 		    trace__fprintf_sys_enter(trace, evsel, sample)) {
2261 			event_format__fprintf(evsel->tp_format, sample->cpu,
2262 					      sample->raw_data, sample->raw_size,
2263 					      trace->output);
2264 			++trace->nr_events_printed;
2265 
2266 			if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2267 				perf_evsel__disable(evsel);
2268 				perf_evsel__close(evsel);
2269 			}
2270 		}
2271 	}
2272 
2273 newline:
2274 	fprintf(trace->output, "\n");
2275 
2276 	if (callchain_ret > 0)
2277 		trace__fprintf_callchain(trace, sample);
2278 	else if (callchain_ret < 0)
2279 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2280 out:
2281 	thread__put(thread);
2282 	return 0;
2283 }
2284 
2285 static void print_location(FILE *f, struct perf_sample *sample,
2286 			   struct addr_location *al,
2287 			   bool print_dso, bool print_sym)
2288 {
2289 
2290 	if ((verbose > 0 || print_dso) && al->map)
2291 		fprintf(f, "%s@", al->map->dso->long_name);
2292 
2293 	if ((verbose > 0 || print_sym) && al->sym)
2294 		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2295 			al->addr - al->sym->start);
2296 	else if (al->map)
2297 		fprintf(f, "0x%" PRIx64, al->addr);
2298 	else
2299 		fprintf(f, "0x%" PRIx64, sample->addr);
2300 }
2301 
2302 static int trace__pgfault(struct trace *trace,
2303 			  struct perf_evsel *evsel,
2304 			  union perf_event *event __maybe_unused,
2305 			  struct perf_sample *sample)
2306 {
2307 	struct thread *thread;
2308 	struct addr_location al;
2309 	char map_type = 'd';
2310 	struct thread_trace *ttrace;
2311 	int err = -1;
2312 	int callchain_ret = 0;
2313 
2314 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2315 
2316 	if (sample->callchain) {
2317 		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2318 		if (callchain_ret == 0) {
2319 			if (callchain_cursor.nr < trace->min_stack)
2320 				goto out_put;
2321 			callchain_ret = 1;
2322 		}
2323 	}
2324 
2325 	ttrace = thread__trace(thread, trace->output);
2326 	if (ttrace == NULL)
2327 		goto out_put;
2328 
2329 	if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2330 		ttrace->pfmaj++;
2331 	else
2332 		ttrace->pfmin++;
2333 
2334 	if (trace->summary_only)
2335 		goto out;
2336 
2337 	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2338 
2339 	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2340 
2341 	fprintf(trace->output, "%sfault [",
2342 		evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2343 		"maj" : "min");
2344 
2345 	print_location(trace->output, sample, &al, false, true);
2346 
2347 	fprintf(trace->output, "] => ");
2348 
2349 	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2350 
2351 	if (!al.map) {
2352 		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2353 
2354 		if (al.map)
2355 			map_type = 'x';
2356 		else
2357 			map_type = '?';
2358 	}
2359 
2360 	print_location(trace->output, sample, &al, true, false);
2361 
2362 	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2363 
2364 	if (callchain_ret > 0)
2365 		trace__fprintf_callchain(trace, sample);
2366 	else if (callchain_ret < 0)
2367 		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
2368 
2369 	++trace->nr_events_printed;
2370 out:
2371 	err = 0;
2372 out_put:
2373 	thread__put(thread);
2374 	return err;
2375 }
2376 
2377 static void trace__set_base_time(struct trace *trace,
2378 				 struct perf_evsel *evsel,
2379 				 struct perf_sample *sample)
2380 {
2381 	/*
2382 	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2383 	 * and don't use sample->time unconditionally, we may end up having
2384 	 * some other event in the future without PERF_SAMPLE_TIME for good
2385 	 * reason, i.e. we may not be interested in its timestamps, just in
2386 	 * it taking place, picking some piece of information when it
2387 	 * appears in our event stream (vfs_getname comes to mind).
2388 	 */
2389 	if (trace->base_time == 0 && !trace->full_time &&
2390 	    (evsel->attr.sample_type & PERF_SAMPLE_TIME))
2391 		trace->base_time = sample->time;
2392 }
2393 
2394 static int trace__process_sample(struct perf_tool *tool,
2395 				 union perf_event *event,
2396 				 struct perf_sample *sample,
2397 				 struct perf_evsel *evsel,
2398 				 struct machine *machine __maybe_unused)
2399 {
2400 	struct trace *trace = container_of(tool, struct trace, tool);
2401 	struct thread *thread;
2402 	int err = 0;
2403 
2404 	tracepoint_handler handler = evsel->handler;
2405 
2406 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2407 	if (thread && thread__is_filtered(thread))
2408 		goto out;
2409 
2410 	trace__set_base_time(trace, evsel, sample);
2411 
2412 	if (handler) {
2413 		++trace->nr_events;
2414 		handler(trace, evsel, event, sample);
2415 	}
2416 out:
2417 	thread__put(thread);
2418 	return err;
2419 }
2420 
2421 static int trace__record(struct trace *trace, int argc, const char **argv)
2422 {
2423 	unsigned int rec_argc, i, j;
2424 	const char **rec_argv;
2425 	const char * const record_args[] = {
2426 		"record",
2427 		"-R",
2428 		"-m", "1024",
2429 		"-c", "1",
2430 	};
2431 
2432 	const char * const sc_args[] = { "-e", };
2433 	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2434 	const char * const majpf_args[] = { "-e", "major-faults" };
2435 	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2436 	const char * const minpf_args[] = { "-e", "minor-faults" };
2437 	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2438 
2439 	/* +1 is for the event string below */
2440 	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2441 		majpf_args_nr + minpf_args_nr + argc;
2442 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2443 
2444 	if (rec_argv == NULL)
2445 		return -ENOMEM;
2446 
2447 	j = 0;
2448 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2449 		rec_argv[j++] = record_args[i];
2450 
2451 	if (trace->trace_syscalls) {
2452 		for (i = 0; i < sc_args_nr; i++)
2453 			rec_argv[j++] = sc_args[i];
2454 
2455 		/* event string may be different for older kernels - e.g., RHEL6 */
2456 		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2457 			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2458 		else if (is_valid_tracepoint("syscalls:sys_enter"))
2459 			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2460 		else {
2461 			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2462 			free(rec_argv);
2463 			return -1;
2464 		}
2465 	}
2466 
2467 	if (trace->trace_pgfaults & TRACE_PFMAJ)
2468 		for (i = 0; i < majpf_args_nr; i++)
2469 			rec_argv[j++] = majpf_args[i];
2470 
2471 	if (trace->trace_pgfaults & TRACE_PFMIN)
2472 		for (i = 0; i < minpf_args_nr; i++)
2473 			rec_argv[j++] = minpf_args[i];
2474 
2475 	for (i = 0; i < (unsigned int)argc; i++)
2476 		rec_argv[j++] = argv[i];
2477 
2478 	return cmd_record(j, rec_argv);
2479 }
2480 
2481 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2482 
2483 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2484 {
2485 	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2486 
2487 	if (IS_ERR(evsel))
2488 		return false;
2489 
2490 	if (perf_evsel__field(evsel, "pathname") == NULL) {
2491 		perf_evsel__delete(evsel);
2492 		return false;
2493 	}
2494 
2495 	evsel->handler = trace__vfs_getname;
2496 	perf_evlist__add(evlist, evsel);
2497 	return true;
2498 }
2499 
2500 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2501 {
2502 	struct perf_evsel *evsel;
2503 	struct perf_event_attr attr = {
2504 		.type = PERF_TYPE_SOFTWARE,
2505 		.mmap_data = 1,
2506 	};
2507 
2508 	attr.config = config;
2509 	attr.sample_period = 1;
2510 
2511 	event_attr_init(&attr);
2512 
2513 	evsel = perf_evsel__new(&attr);
2514 	if (evsel)
2515 		evsel->handler = trace__pgfault;
2516 
2517 	return evsel;
2518 }
2519 
2520 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2521 {
2522 	const u32 type = event->header.type;
2523 	struct perf_evsel *evsel;
2524 
2525 	if (type != PERF_RECORD_SAMPLE) {
2526 		trace__process_event(trace, trace->host, event, sample);
2527 		return;
2528 	}
2529 
2530 	evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2531 	if (evsel == NULL) {
2532 		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2533 		return;
2534 	}
2535 
2536 	trace__set_base_time(trace, evsel, sample);
2537 
2538 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2539 	    sample->raw_data == NULL) {
2540 		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2541 		       perf_evsel__name(evsel), sample->tid,
2542 		       sample->cpu, sample->raw_size);
2543 	} else {
2544 		tracepoint_handler handler = evsel->handler;
2545 		handler(trace, evsel, event, sample);
2546 	}
2547 
2548 	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
2549 		interrupted = true;
2550 }
2551 
2552 static int trace__add_syscall_newtp(struct trace *trace)
2553 {
2554 	int ret = -1;
2555 	struct perf_evlist *evlist = trace->evlist;
2556 	struct perf_evsel *sys_enter, *sys_exit;
2557 
2558 	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
2559 	if (sys_enter == NULL)
2560 		goto out;
2561 
2562 	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2563 		goto out_delete_sys_enter;
2564 
2565 	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
2566 	if (sys_exit == NULL)
2567 		goto out_delete_sys_enter;
2568 
2569 	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2570 		goto out_delete_sys_exit;
2571 
2572 	perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
2573 	perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
2574 
2575 	perf_evlist__add(evlist, sys_enter);
2576 	perf_evlist__add(evlist, sys_exit);
2577 
2578 	if (callchain_param.enabled && !trace->kernel_syscallchains) {
2579 		/*
2580 		 * We're interested only in the user space callchain
2581 		 * leading to the syscall, allow overriding that for
2582 		 * debugging reasons using --kernel_syscall_callchains
2583 		 */
2584 		sys_exit->attr.exclude_callchain_kernel = 1;
2585 	}
2586 
2587 	trace->syscalls.events.sys_enter = sys_enter;
2588 	trace->syscalls.events.sys_exit  = sys_exit;
2589 
2590 	ret = 0;
2591 out:
2592 	return ret;
2593 
2594 out_delete_sys_exit:
2595 	perf_evsel__delete_priv(sys_exit);
2596 out_delete_sys_enter:
2597 	perf_evsel__delete_priv(sys_enter);
2598 	goto out;
2599 }
2600 
2601 static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
2602 {
2603 	int err = -1;
2604 	struct perf_evsel *sys_exit;
2605 	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2606 						trace->ev_qualifier_ids.nr,
2607 						trace->ev_qualifier_ids.entries);
2608 
2609 	if (filter == NULL)
2610 		goto out_enomem;
2611 
2612 	if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2613 					  filter)) {
2614 		sys_exit = trace->syscalls.events.sys_exit;
2615 		err = perf_evsel__append_tp_filter(sys_exit, filter);
2616 	}
2617 
2618 	free(filter);
2619 out:
2620 	return err;
2621 out_enomem:
2622 	errno = ENOMEM;
2623 	goto out;
2624 }
2625 
2626 #ifdef HAVE_LIBBPF_SUPPORT
2627 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
2628 {
2629 	int fd = bpf_map__fd(trace->syscalls.map);
2630 	struct bpf_map_syscall_entry value = {
2631 		.enabled = !trace->not_ev_qualifier,
2632 	};
2633 	int err = 0;
2634 	size_t i;
2635 
2636 	for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
2637 		int key = trace->ev_qualifier_ids.entries[i];
2638 
2639 		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
2640 		if (err)
2641 			break;
2642 	}
2643 
2644 	return err;
2645 }
2646 
2647 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
2648 {
2649 	int fd = bpf_map__fd(trace->syscalls.map);
2650 	struct bpf_map_syscall_entry value = {
2651 		.enabled = enabled,
2652 	};
2653 	int err = 0, key;
2654 
2655 	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
2656 		err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
2657 		if (err)
2658 			break;
2659 	}
2660 
2661 	return err;
2662 }
2663 
2664 static int trace__init_syscalls_bpf_map(struct trace *trace)
2665 {
2666 	bool enabled = true;
2667 
2668 	if (trace->ev_qualifier_ids.nr)
2669 		enabled = trace->not_ev_qualifier;
2670 
2671 	return __trace__init_syscalls_bpf_map(trace, enabled);
2672 }
2673 #else
2674 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
2675 {
2676 	return 0;
2677 }
2678 
2679 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
2680 {
2681 	return 0;
2682 }
2683 #endif // HAVE_LIBBPF_SUPPORT
2684 
2685 static int trace__set_ev_qualifier_filter(struct trace *trace)
2686 {
2687 	if (trace->syscalls.map)
2688 		return trace__set_ev_qualifier_bpf_filter(trace);
2689 	return trace__set_ev_qualifier_tp_filter(trace);
2690 }
2691 
2692 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
2693 				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
2694 {
2695 	int err = 0;
2696 #ifdef HAVE_LIBBPF_SUPPORT
2697 	bool value = true;
2698 	int map_fd = bpf_map__fd(map);
2699 	size_t i;
2700 
2701 	for (i = 0; i < npids; ++i) {
2702 		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
2703 		if (err)
2704 			break;
2705 	}
2706 #endif
2707 	return err;
2708 }
2709 
2710 static int trace__set_filter_loop_pids(struct trace *trace)
2711 {
2712 	unsigned int nr = 1, err;
2713 	pid_t pids[32] = {
2714 		getpid(),
2715 	};
2716 	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
2717 
2718 	while (thread && nr < ARRAY_SIZE(pids)) {
2719 		struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
2720 
2721 		if (parent == NULL)
2722 			break;
2723 
2724 		if (!strcmp(thread__comm_str(parent), "sshd")) {
2725 			pids[nr++] = parent->tid;
2726 			break;
2727 		}
2728 		thread = parent;
2729 	}
2730 
2731 	err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
2732 	if (!err && trace->filter_pids.map)
2733 		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
2734 
2735 	return err;
2736 }
2737 
2738 static int trace__set_filter_pids(struct trace *trace)
2739 {
2740 	int err = 0;
2741 	/*
2742 	 * Better not use !target__has_task() here because we need to cover the
2743 	 * case where no threads were specified in the command line, but a
2744 	 * workload was, and in that case we will fill in the thread_map when
2745 	 * we fork the workload in perf_evlist__prepare_workload.
2746 	 */
2747 	if (trace->filter_pids.nr > 0) {
2748 		err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
2749 						      trace->filter_pids.entries);
2750 		if (!err && trace->filter_pids.map) {
2751 			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
2752 						       trace->filter_pids.entries);
2753 		}
2754 	} else if (thread_map__pid(trace->evlist->threads, 0) == -1) {
2755 		err = trace__set_filter_loop_pids(trace);
2756 	}
2757 
2758 	return err;
2759 }
2760 
2761 static int __trace__deliver_event(struct trace *trace, union perf_event *event)
2762 {
2763 	struct perf_evlist *evlist = trace->evlist;
2764 	struct perf_sample sample;
2765 	int err;
2766 
2767 	err = perf_evlist__parse_sample(evlist, event, &sample);
2768 	if (err)
2769 		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2770 	else
2771 		trace__handle_event(trace, event, &sample);
2772 
2773 	return 0;
2774 }
2775 
2776 static int __trace__flush_events(struct trace *trace)
2777 {
2778 	u64 first = ordered_events__first_time(&trace->oe.data);
2779 	u64 flush = trace->oe.last - NSEC_PER_SEC;
2780 
2781 	/* Is there some thing to flush.. */
2782 	if (first && first < flush)
2783 		return ordered_events__flush_time(&trace->oe.data, flush);
2784 
2785 	return 0;
2786 }
2787 
2788 static int trace__flush_events(struct trace *trace)
2789 {
2790 	return !trace->sort_events ? 0 : __trace__flush_events(trace);
2791 }
2792 
2793 static int trace__deliver_event(struct trace *trace, union perf_event *event)
2794 {
2795 	int err;
2796 
2797 	if (!trace->sort_events)
2798 		return __trace__deliver_event(trace, event);
2799 
2800 	err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
2801 	if (err && err != -1)
2802 		return err;
2803 
2804 	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
2805 	if (err)
2806 		return err;
2807 
2808 	return trace__flush_events(trace);
2809 }
2810 
2811 static int ordered_events__deliver_event(struct ordered_events *oe,
2812 					 struct ordered_event *event)
2813 {
2814 	struct trace *trace = container_of(oe, struct trace, oe.data);
2815 
2816 	return __trace__deliver_event(trace, event->event);
2817 }
2818 
2819 static int trace__run(struct trace *trace, int argc, const char **argv)
2820 {
2821 	struct perf_evlist *evlist = trace->evlist;
2822 	struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2823 	int err = -1, i;
2824 	unsigned long before;
2825 	const bool forks = argc > 0;
2826 	bool draining = false;
2827 
2828 	trace->live = true;
2829 
2830 	if (!trace->raw_augmented_syscalls) {
2831 		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2832 			goto out_error_raw_syscalls;
2833 
2834 		if (trace->trace_syscalls)
2835 			trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2836 	}
2837 
2838 	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2839 		pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2840 		if (pgfault_maj == NULL)
2841 			goto out_error_mem;
2842 		perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2843 		perf_evlist__add(evlist, pgfault_maj);
2844 	}
2845 
2846 	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2847 		pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2848 		if (pgfault_min == NULL)
2849 			goto out_error_mem;
2850 		perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2851 		perf_evlist__add(evlist, pgfault_min);
2852 	}
2853 
2854 	if (trace->sched &&
2855 	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2856 				   trace__sched_stat_runtime))
2857 		goto out_error_sched_stat_runtime;
2858 
2859 	/*
2860 	 * If a global cgroup was set, apply it to all the events without an
2861 	 * explicit cgroup. I.e.:
2862 	 *
2863 	 * 	trace -G A -e sched:*switch
2864 	 *
2865 	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
2866 	 * _and_ sched:sched_switch to the 'A' cgroup, while:
2867 	 *
2868 	 * trace -e sched:*switch -G A
2869 	 *
2870 	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
2871 	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
2872 	 * a cgroup (on the root cgroup, sys wide, etc).
2873 	 *
2874 	 * Multiple cgroups:
2875 	 *
2876 	 * trace -G A -e sched:*switch -G B
2877 	 *
2878 	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
2879 	 * to the 'B' cgroup.
2880 	 *
2881 	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
2882 	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
2883 	 */
2884 	if (trace->cgroup)
2885 		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
2886 
2887 	err = perf_evlist__create_maps(evlist, &trace->opts.target);
2888 	if (err < 0) {
2889 		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2890 		goto out_delete_evlist;
2891 	}
2892 
2893 	err = trace__symbols_init(trace, evlist);
2894 	if (err < 0) {
2895 		fprintf(trace->output, "Problems initializing symbol libraries!\n");
2896 		goto out_delete_evlist;
2897 	}
2898 
2899 	perf_evlist__config(evlist, &trace->opts, &callchain_param);
2900 
2901 	signal(SIGCHLD, sig_handler);
2902 	signal(SIGINT, sig_handler);
2903 
2904 	if (forks) {
2905 		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2906 						    argv, false, NULL);
2907 		if (err < 0) {
2908 			fprintf(trace->output, "Couldn't run the workload!\n");
2909 			goto out_delete_evlist;
2910 		}
2911 	}
2912 
2913 	err = perf_evlist__open(evlist);
2914 	if (err < 0)
2915 		goto out_error_open;
2916 
2917 	err = bpf__apply_obj_config();
2918 	if (err) {
2919 		char errbuf[BUFSIZ];
2920 
2921 		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2922 		pr_err("ERROR: Apply config to BPF failed: %s\n",
2923 			 errbuf);
2924 		goto out_error_open;
2925 	}
2926 
2927 	err = trace__set_filter_pids(trace);
2928 	if (err < 0)
2929 		goto out_error_mem;
2930 
2931 	if (trace->syscalls.map)
2932 		trace__init_syscalls_bpf_map(trace);
2933 
2934 	if (trace->ev_qualifier_ids.nr > 0) {
2935 		err = trace__set_ev_qualifier_filter(trace);
2936 		if (err < 0)
2937 			goto out_errno;
2938 
2939 		if (trace->syscalls.events.sys_exit) {
2940 			pr_debug("event qualifier tracepoint filter: %s\n",
2941 				 trace->syscalls.events.sys_exit->filter);
2942 		}
2943 	}
2944 
2945 	err = perf_evlist__apply_filters(evlist, &evsel);
2946 	if (err < 0)
2947 		goto out_error_apply_filters;
2948 
2949 	err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
2950 	if (err < 0)
2951 		goto out_error_mmap;
2952 
2953 	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2954 		perf_evlist__enable(evlist);
2955 
2956 	if (forks)
2957 		perf_evlist__start_workload(evlist);
2958 
2959 	if (trace->opts.initial_delay) {
2960 		usleep(trace->opts.initial_delay * 1000);
2961 		perf_evlist__enable(evlist);
2962 	}
2963 
2964 	trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2965 				  evlist->threads->nr > 1 ||
2966 				  perf_evlist__first(evlist)->attr.inherit;
2967 
2968 	/*
2969 	 * Now that we already used evsel->attr to ask the kernel to setup the
2970 	 * events, lets reuse evsel->attr.sample_max_stack as the limit in
2971 	 * trace__resolve_callchain(), allowing per-event max-stack settings
2972 	 * to override an explicitly set --max-stack global setting.
2973 	 */
2974 	evlist__for_each_entry(evlist, evsel) {
2975 		if (evsel__has_callchain(evsel) &&
2976 		    evsel->attr.sample_max_stack == 0)
2977 			evsel->attr.sample_max_stack = trace->max_stack;
2978 	}
2979 again:
2980 	before = trace->nr_events;
2981 
2982 	for (i = 0; i < evlist->nr_mmaps; i++) {
2983 		union perf_event *event;
2984 		struct perf_mmap *md;
2985 
2986 		md = &evlist->mmap[i];
2987 		if (perf_mmap__read_init(md) < 0)
2988 			continue;
2989 
2990 		while ((event = perf_mmap__read_event(md)) != NULL) {
2991 			++trace->nr_events;
2992 
2993 			err = trace__deliver_event(trace, event);
2994 			if (err)
2995 				goto out_disable;
2996 
2997 			perf_mmap__consume(md);
2998 
2999 			if (interrupted)
3000 				goto out_disable;
3001 
3002 			if (done && !draining) {
3003 				perf_evlist__disable(evlist);
3004 				draining = true;
3005 			}
3006 		}
3007 		perf_mmap__read_done(md);
3008 	}
3009 
3010 	if (trace->nr_events == before) {
3011 		int timeout = done ? 100 : -1;
3012 
3013 		if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
3014 			if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
3015 				draining = true;
3016 
3017 			goto again;
3018 		} else {
3019 			if (trace__flush_events(trace))
3020 				goto out_disable;
3021 		}
3022 	} else {
3023 		goto again;
3024 	}
3025 
3026 out_disable:
3027 	thread__zput(trace->current);
3028 
3029 	perf_evlist__disable(evlist);
3030 
3031 	if (trace->sort_events)
3032 		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
3033 
3034 	if (!err) {
3035 		if (trace->summary)
3036 			trace__fprintf_thread_summary(trace, trace->output);
3037 
3038 		if (trace->show_tool_stats) {
3039 			fprintf(trace->output, "Stats:\n "
3040 					       " vfs_getname : %" PRIu64 "\n"
3041 					       " proc_getname: %" PRIu64 "\n",
3042 				trace->stats.vfs_getname,
3043 				trace->stats.proc_getname);
3044 		}
3045 	}
3046 
3047 out_delete_evlist:
3048 	trace__symbols__exit(trace);
3049 
3050 	perf_evlist__delete(evlist);
3051 	cgroup__put(trace->cgroup);
3052 	trace->evlist = NULL;
3053 	trace->live = false;
3054 	return err;
3055 {
3056 	char errbuf[BUFSIZ];
3057 
3058 out_error_sched_stat_runtime:
3059 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
3060 	goto out_error;
3061 
3062 out_error_raw_syscalls:
3063 	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
3064 	goto out_error;
3065 
3066 out_error_mmap:
3067 	perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
3068 	goto out_error;
3069 
3070 out_error_open:
3071 	perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
3072 
3073 out_error:
3074 	fprintf(trace->output, "%s\n", errbuf);
3075 	goto out_delete_evlist;
3076 
3077 out_error_apply_filters:
3078 	fprintf(trace->output,
3079 		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
3080 		evsel->filter, perf_evsel__name(evsel), errno,
3081 		str_error_r(errno, errbuf, sizeof(errbuf)));
3082 	goto out_delete_evlist;
3083 }
3084 out_error_mem:
3085 	fprintf(trace->output, "Not enough memory to run!\n");
3086 	goto out_delete_evlist;
3087 
3088 out_errno:
3089 	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
3090 	goto out_delete_evlist;
3091 }
3092 
3093 static int trace__replay(struct trace *trace)
3094 {
3095 	const struct perf_evsel_str_handler handlers[] = {
3096 		{ "probe:vfs_getname",	     trace__vfs_getname, },
3097 	};
3098 	struct perf_data data = {
3099 		.file      = {
3100 			.path = input_name,
3101 		},
3102 		.mode      = PERF_DATA_MODE_READ,
3103 		.force     = trace->force,
3104 	};
3105 	struct perf_session *session;
3106 	struct perf_evsel *evsel;
3107 	int err = -1;
3108 
3109 	trace->tool.sample	  = trace__process_sample;
3110 	trace->tool.mmap	  = perf_event__process_mmap;
3111 	trace->tool.mmap2	  = perf_event__process_mmap2;
3112 	trace->tool.comm	  = perf_event__process_comm;
3113 	trace->tool.exit	  = perf_event__process_exit;
3114 	trace->tool.fork	  = perf_event__process_fork;
3115 	trace->tool.attr	  = perf_event__process_attr;
3116 	trace->tool.tracing_data  = perf_event__process_tracing_data;
3117 	trace->tool.build_id	  = perf_event__process_build_id;
3118 	trace->tool.namespaces	  = perf_event__process_namespaces;
3119 
3120 	trace->tool.ordered_events = true;
3121 	trace->tool.ordering_requires_timestamps = true;
3122 
3123 	/* add tid to output */
3124 	trace->multiple_threads = true;
3125 
3126 	session = perf_session__new(&data, false, &trace->tool);
3127 	if (session == NULL)
3128 		return -1;
3129 
3130 	if (trace->opts.target.pid)
3131 		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
3132 
3133 	if (trace->opts.target.tid)
3134 		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
3135 
3136 	if (symbol__init(&session->header.env) < 0)
3137 		goto out;
3138 
3139 	trace->host = &session->machines.host;
3140 
3141 	err = perf_session__set_tracepoints_handlers(session, handlers);
3142 	if (err)
3143 		goto out;
3144 
3145 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3146 						     "raw_syscalls:sys_enter");
3147 	/* older kernels have syscalls tp versus raw_syscalls */
3148 	if (evsel == NULL)
3149 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3150 							     "syscalls:sys_enter");
3151 
3152 	if (evsel &&
3153 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
3154 	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
3155 		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3156 		goto out;
3157 	}
3158 
3159 	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3160 						     "raw_syscalls:sys_exit");
3161 	if (evsel == NULL)
3162 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
3163 							     "syscalls:sys_exit");
3164 	if (evsel &&
3165 	    (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
3166 	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
3167 		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3168 		goto out;
3169 	}
3170 
3171 	evlist__for_each_entry(session->evlist, evsel) {
3172 		if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
3173 		    (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
3174 		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
3175 		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
3176 			evsel->handler = trace__pgfault;
3177 	}
3178 
3179 	setup_pager();
3180 
3181 	err = perf_session__process_events(session);
3182 	if (err)
3183 		pr_err("Failed to process events, error %d", err);
3184 
3185 	else if (trace->summary)
3186 		trace__fprintf_thread_summary(trace, trace->output);
3187 
3188 out:
3189 	perf_session__delete(session);
3190 
3191 	return err;
3192 }
3193 
3194 static size_t trace__fprintf_threads_header(FILE *fp)
3195 {
3196 	size_t printed;
3197 
3198 	printed  = fprintf(fp, "\n Summary of events:\n\n");
3199 
3200 	return printed;
3201 }
3202 
3203 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
3204 	struct stats 	*stats;
3205 	double		msecs;
3206 	int		syscall;
3207 )
3208 {
3209 	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
3210 	struct stats *stats = source->priv;
3211 
3212 	entry->syscall = source->i;
3213 	entry->stats   = stats;
3214 	entry->msecs   = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
3215 }
3216 
3217 static size_t thread__dump_stats(struct thread_trace *ttrace,
3218 				 struct trace *trace, FILE *fp)
3219 {
3220 	size_t printed = 0;
3221 	struct syscall *sc;
3222 	struct rb_node *nd;
3223 	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
3224 
3225 	if (syscall_stats == NULL)
3226 		return 0;
3227 
3228 	printed += fprintf(fp, "\n");
3229 
3230 	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
3231 	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
3232 	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
3233 
3234 	resort_rb__for_each_entry(nd, syscall_stats) {
3235 		struct stats *stats = syscall_stats_entry->stats;
3236 		if (stats) {
3237 			double min = (double)(stats->min) / NSEC_PER_MSEC;
3238 			double max = (double)(stats->max) / NSEC_PER_MSEC;
3239 			double avg = avg_stats(stats);
3240 			double pct;
3241 			u64 n = (u64) stats->n;
3242 
3243 			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
3244 			avg /= NSEC_PER_MSEC;
3245 
3246 			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
3247 			printed += fprintf(fp, "   %-15s", sc->name);
3248 			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
3249 					   n, syscall_stats_entry->msecs, min, avg);
3250 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
3251 		}
3252 	}
3253 
3254 	resort_rb__delete(syscall_stats);
3255 	printed += fprintf(fp, "\n\n");
3256 
3257 	return printed;
3258 }
3259 
3260 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
3261 {
3262 	size_t printed = 0;
3263 	struct thread_trace *ttrace = thread__priv(thread);
3264 	double ratio;
3265 
3266 	if (ttrace == NULL)
3267 		return 0;
3268 
3269 	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
3270 
3271 	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
3272 	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
3273 	printed += fprintf(fp, "%.1f%%", ratio);
3274 	if (ttrace->pfmaj)
3275 		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
3276 	if (ttrace->pfmin)
3277 		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
3278 	if (trace->sched)
3279 		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
3280 	else if (fputc('\n', fp) != EOF)
3281 		++printed;
3282 
3283 	printed += thread__dump_stats(ttrace, trace, fp);
3284 
3285 	return printed;
3286 }
3287 
3288 static unsigned long thread__nr_events(struct thread_trace *ttrace)
3289 {
3290 	return ttrace ? ttrace->nr_events : 0;
3291 }
3292 
3293 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
3294 	struct thread *thread;
3295 )
3296 {
3297 	entry->thread = rb_entry(nd, struct thread, rb_node);
3298 }
3299 
3300 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
3301 {
3302 	size_t printed = trace__fprintf_threads_header(fp);
3303 	struct rb_node *nd;
3304 	int i;
3305 
3306 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3307 		DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
3308 
3309 		if (threads == NULL) {
3310 			fprintf(fp, "%s", "Error sorting output by nr_events!\n");
3311 			return 0;
3312 		}
3313 
3314 		resort_rb__for_each_entry(nd, threads)
3315 			printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
3316 
3317 		resort_rb__delete(threads);
3318 	}
3319 	return printed;
3320 }
3321 
3322 static int trace__set_duration(const struct option *opt, const char *str,
3323 			       int unset __maybe_unused)
3324 {
3325 	struct trace *trace = opt->value;
3326 
3327 	trace->duration_filter = atof(str);
3328 	return 0;
3329 }
3330 
3331 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
3332 					      int unset __maybe_unused)
3333 {
3334 	int ret = -1;
3335 	size_t i;
3336 	struct trace *trace = opt->value;
3337 	/*
3338 	 * FIXME: introduce a intarray class, plain parse csv and create a
3339 	 * { int nr, int entries[] } struct...
3340 	 */
3341 	struct intlist *list = intlist__new(str);
3342 
3343 	if (list == NULL)
3344 		return -1;
3345 
3346 	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3347 	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3348 
3349 	if (trace->filter_pids.entries == NULL)
3350 		goto out;
3351 
3352 	trace->filter_pids.entries[0] = getpid();
3353 
3354 	for (i = 1; i < trace->filter_pids.nr; ++i)
3355 		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3356 
3357 	intlist__delete(list);
3358 	ret = 0;
3359 out:
3360 	return ret;
3361 }
3362 
3363 static int trace__open_output(struct trace *trace, const char *filename)
3364 {
3365 	struct stat st;
3366 
3367 	if (!stat(filename, &st) && st.st_size) {
3368 		char oldname[PATH_MAX];
3369 
3370 		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3371 		unlink(oldname);
3372 		rename(filename, oldname);
3373 	}
3374 
3375 	trace->output = fopen(filename, "w");
3376 
3377 	return trace->output == NULL ? -errno : 0;
3378 }
3379 
3380 static int parse_pagefaults(const struct option *opt, const char *str,
3381 			    int unset __maybe_unused)
3382 {
3383 	int *trace_pgfaults = opt->value;
3384 
3385 	if (strcmp(str, "all") == 0)
3386 		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3387 	else if (strcmp(str, "maj") == 0)
3388 		*trace_pgfaults |= TRACE_PFMAJ;
3389 	else if (strcmp(str, "min") == 0)
3390 		*trace_pgfaults |= TRACE_PFMIN;
3391 	else
3392 		return -1;
3393 
3394 	return 0;
3395 }
3396 
3397 static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
3398 {
3399 	struct perf_evsel *evsel;
3400 
3401 	evlist__for_each_entry(evlist, evsel)
3402 		evsel->handler = handler;
3403 }
3404 
3405 static int evlist__set_syscall_tp_fields(struct perf_evlist *evlist)
3406 {
3407 	struct perf_evsel *evsel;
3408 
3409 	evlist__for_each_entry(evlist, evsel) {
3410 		if (evsel->priv || !evsel->tp_format)
3411 			continue;
3412 
3413 		if (strcmp(evsel->tp_format->system, "syscalls"))
3414 			continue;
3415 
3416 		if (perf_evsel__init_syscall_tp(evsel))
3417 			return -1;
3418 
3419 		if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
3420 			struct syscall_tp *sc = evsel->priv;
3421 
3422 			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
3423 				return -1;
3424 		} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
3425 			struct syscall_tp *sc = evsel->priv;
3426 
3427 			if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
3428 				return -1;
3429 		}
3430 	}
3431 
3432 	return 0;
3433 }
3434 
3435 /*
3436  * XXX: Hackish, just splitting the combined -e+--event (syscalls
3437  * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3438  * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3439  *
3440  * It'd be better to introduce a parse_options() variant that would return a
3441  * list with the terms it didn't match to an event...
3442  */
3443 static int trace__parse_events_option(const struct option *opt, const char *str,
3444 				      int unset __maybe_unused)
3445 {
3446 	struct trace *trace = (struct trace *)opt->value;
3447 	const char *s = str;
3448 	char *sep = NULL, *lists[2] = { NULL, NULL, };
3449 	int len = strlen(str) + 1, err = -1, list, idx;
3450 	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
3451 	char group_name[PATH_MAX];
3452 	struct syscall_fmt *fmt;
3453 
3454 	if (strace_groups_dir == NULL)
3455 		return -1;
3456 
3457 	if (*s == '!') {
3458 		++s;
3459 		trace->not_ev_qualifier = true;
3460 	}
3461 
3462 	while (1) {
3463 		if ((sep = strchr(s, ',')) != NULL)
3464 			*sep = '\0';
3465 
3466 		list = 0;
3467 		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
3468 		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
3469 			list = 1;
3470 			goto do_concat;
3471 		}
3472 
3473 		fmt = syscall_fmt__find_by_alias(s);
3474 		if (fmt != NULL) {
3475 			list = 1;
3476 			s = fmt->name;
3477 		} else {
3478 			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
3479 			if (access(group_name, R_OK) == 0)
3480 				list = 1;
3481 		}
3482 do_concat:
3483 		if (lists[list]) {
3484 			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
3485 		} else {
3486 			lists[list] = malloc(len);
3487 			if (lists[list] == NULL)
3488 				goto out;
3489 			strcpy(lists[list], s);
3490 		}
3491 
3492 		if (!sep)
3493 			break;
3494 
3495 		*sep = ',';
3496 		s = sep + 1;
3497 	}
3498 
3499 	if (lists[1] != NULL) {
3500 		struct strlist_config slist_config = {
3501 			.dirname = strace_groups_dir,
3502 		};
3503 
3504 		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
3505 		if (trace->ev_qualifier == NULL) {
3506 			fputs("Not enough memory to parse event qualifier", trace->output);
3507 			goto out;
3508 		}
3509 
3510 		if (trace__validate_ev_qualifier(trace))
3511 			goto out;
3512 		trace->trace_syscalls = true;
3513 	}
3514 
3515 	err = 0;
3516 
3517 	if (lists[0]) {
3518 		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
3519 					       "event selector. use 'perf list' to list available events",
3520 					       parse_events_option);
3521 		err = parse_events_option(&o, lists[0], 0);
3522 	}
3523 out:
3524 	if (sep)
3525 		*sep = ',';
3526 
3527 	return err;
3528 }
3529 
3530 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
3531 {
3532 	struct trace *trace = opt->value;
3533 
3534 	if (!list_empty(&trace->evlist->entries))
3535 		return parse_cgroups(opt, str, unset);
3536 
3537 	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
3538 
3539 	return 0;
3540 }
3541 
3542 static struct bpf_map *bpf__find_map_by_name(const char *name)
3543 {
3544 	struct bpf_object *obj, *tmp;
3545 
3546 	bpf_object__for_each_safe(obj, tmp) {
3547 		struct bpf_map *map = bpf_object__find_map_by_name(obj, name);
3548 		if (map)
3549 			return map;
3550 
3551 	}
3552 
3553 	return NULL;
3554 }
3555 
3556 static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3557 {
3558 	trace->filter_pids.map = bpf__find_map_by_name("pids_filtered");
3559 }
3560 
3561 static void trace__set_bpf_map_syscalls(struct trace *trace)
3562 {
3563 	trace->syscalls.map = bpf__find_map_by_name("syscalls");
3564 }
3565 
3566 static int trace__config(const char *var, const char *value, void *arg)
3567 {
3568 	struct trace *trace = arg;
3569 	int err = 0;
3570 
3571 	if (!strcmp(var, "trace.add_events")) {
3572 		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
3573 					       "event selector. use 'perf list' to list available events",
3574 					       parse_events_option);
3575 		err = parse_events_option(&o, value, 0);
3576 	} else if (!strcmp(var, "trace.show_timestamp")) {
3577 		trace->show_tstamp = perf_config_bool(var, value);
3578 	} else if (!strcmp(var, "trace.show_duration")) {
3579 		trace->show_duration = perf_config_bool(var, value);
3580 	} else if (!strcmp(var, "trace.show_arg_names")) {
3581 		trace->show_arg_names = perf_config_bool(var, value);
3582 		if (!trace->show_arg_names)
3583 			trace->show_zeros = true;
3584 	} else if (!strcmp(var, "trace.show_zeros")) {
3585 		bool new_show_zeros = perf_config_bool(var, value);
3586 		if (!trace->show_arg_names && !new_show_zeros) {
3587 			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
3588 			goto out;
3589 		}
3590 		trace->show_zeros = new_show_zeros;
3591 	} else if (!strcmp(var, "trace.show_prefix")) {
3592 		trace->show_string_prefix = perf_config_bool(var, value);
3593 	} else if (!strcmp(var, "trace.no_inherit")) {
3594 		trace->opts.no_inherit = perf_config_bool(var, value);
3595 	} else if (!strcmp(var, "trace.args_alignment")) {
3596 		int args_alignment = 0;
3597 		if (perf_config_int(&args_alignment, var, value) == 0)
3598 			trace->args_alignment = args_alignment;
3599 	}
3600 out:
3601 	return err;
3602 }
3603 
3604 int cmd_trace(int argc, const char **argv)
3605 {
3606 	const char *trace_usage[] = {
3607 		"perf trace [<options>] [<command>]",
3608 		"perf trace [<options>] -- <command> [<options>]",
3609 		"perf trace record [<options>] [<command>]",
3610 		"perf trace record [<options>] -- <command> [<options>]",
3611 		NULL
3612 	};
3613 	struct trace trace = {
3614 		.syscalls = {
3615 			. max = -1,
3616 		},
3617 		.opts = {
3618 			.target = {
3619 				.uid	   = UINT_MAX,
3620 				.uses_mmap = true,
3621 			},
3622 			.user_freq     = UINT_MAX,
3623 			.user_interval = ULLONG_MAX,
3624 			.no_buffering  = true,
3625 			.mmap_pages    = UINT_MAX,
3626 		},
3627 		.output = stderr,
3628 		.show_comm = true,
3629 		.show_tstamp = true,
3630 		.show_duration = true,
3631 		.show_arg_names = true,
3632 		.args_alignment = 70,
3633 		.trace_syscalls = false,
3634 		.kernel_syscallchains = false,
3635 		.max_stack = UINT_MAX,
3636 		.max_events = ULONG_MAX,
3637 	};
3638 	const char *output_name = NULL;
3639 	const struct option trace_options[] = {
3640 	OPT_CALLBACK('e', "event", &trace, "event",
3641 		     "event/syscall selector. use 'perf list' to list available events",
3642 		     trace__parse_events_option),
3643 	OPT_BOOLEAN(0, "comm", &trace.show_comm,
3644 		    "show the thread COMM next to its id"),
3645 	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
3646 	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
3647 		     trace__parse_events_option),
3648 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
3649 	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
3650 	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
3651 		    "trace events on existing process id"),
3652 	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
3653 		    "trace events on existing thread id"),
3654 	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
3655 		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
3656 	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
3657 		    "system-wide collection from all CPUs"),
3658 	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
3659 		    "list of cpus to monitor"),
3660 	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
3661 		    "child tasks do not inherit counters"),
3662 	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
3663 		     "number of mmap data pages",
3664 		     perf_evlist__parse_mmap_pages),
3665 	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
3666 		   "user to profile"),
3667 	OPT_CALLBACK(0, "duration", &trace, "float",
3668 		     "show only events with duration > N.M ms",
3669 		     trace__set_duration),
3670 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
3671 	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
3672 	OPT_BOOLEAN('T', "time", &trace.full_time,
3673 		    "Show full timestamp, not time relative to first start"),
3674 	OPT_BOOLEAN(0, "failure", &trace.failure_only,
3675 		    "Show only syscalls that failed"),
3676 	OPT_BOOLEAN('s', "summary", &trace.summary_only,
3677 		    "Show only syscall summary with statistics"),
3678 	OPT_BOOLEAN('S', "with-summary", &trace.summary,
3679 		    "Show all syscalls and summary with statistics"),
3680 	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
3681 		     "Trace pagefaults", parse_pagefaults, "maj"),
3682 	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
3683 	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
3684 	OPT_CALLBACK(0, "call-graph", &trace.opts,
3685 		     "record_mode[,record_size]", record_callchain_help,
3686 		     &record_parse_callchain_opt),
3687 	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
3688 		    "Show the kernel callchains on the syscall exit path"),
3689 	OPT_ULONG(0, "max-events", &trace.max_events,
3690 		"Set the maximum number of events to print, exit after that is reached. "),
3691 	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
3692 		     "Set the minimum stack depth when parsing the callchain, "
3693 		     "anything below the specified depth will be ignored."),
3694 	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
3695 		     "Set the maximum stack depth when parsing the callchain, "
3696 		     "anything beyond the specified depth will be ignored. "
3697 		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
3698 	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
3699 			"Sort batch of events before processing, use if getting out of order events"),
3700 	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
3701 			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
3702 	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3703 			"per thread proc mmap processing timeout in ms"),
3704 	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
3705 		     trace__parse_cgroups),
3706 	OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
3707 		     "ms to wait before starting measurement after program "
3708 		     "start"),
3709 	OPT_END()
3710 	};
3711 	bool __maybe_unused max_stack_user_set = true;
3712 	bool mmap_pages_user_set = true;
3713 	struct perf_evsel *evsel;
3714 	const char * const trace_subcommands[] = { "record", NULL };
3715 	int err = -1;
3716 	char bf[BUFSIZ];
3717 
3718 	signal(SIGSEGV, sighandler_dump_stack);
3719 	signal(SIGFPE, sighandler_dump_stack);
3720 
3721 	trace.evlist = perf_evlist__new();
3722 	trace.sctbl = syscalltbl__new();
3723 
3724 	if (trace.evlist == NULL || trace.sctbl == NULL) {
3725 		pr_err("Not enough memory to run!\n");
3726 		err = -ENOMEM;
3727 		goto out;
3728 	}
3729 
3730 	err = perf_config(trace__config, &trace);
3731 	if (err)
3732 		goto out;
3733 
3734 	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
3735 				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3736 
3737 	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
3738 		usage_with_options_msg(trace_usage, trace_options,
3739 				       "cgroup monitoring only available in system-wide mode");
3740 	}
3741 
3742 	evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
3743 	if (IS_ERR(evsel)) {
3744 		bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
3745 		pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
3746 		goto out;
3747 	}
3748 
3749 	if (evsel) {
3750 		trace.syscalls.events.augmented = evsel;
3751 		trace__set_bpf_map_filtered_pids(&trace);
3752 		trace__set_bpf_map_syscalls(&trace);
3753 	}
3754 
3755 	err = bpf__setup_stdout(trace.evlist);
3756 	if (err) {
3757 		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
3758 		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
3759 		goto out;
3760 	}
3761 
3762 	err = -1;
3763 
3764 	if (trace.trace_pgfaults) {
3765 		trace.opts.sample_address = true;
3766 		trace.opts.sample_time = true;
3767 	}
3768 
3769 	if (trace.opts.mmap_pages == UINT_MAX)
3770 		mmap_pages_user_set = false;
3771 
3772 	if (trace.max_stack == UINT_MAX) {
3773 		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
3774 		max_stack_user_set = false;
3775 	}
3776 
3777 #ifdef HAVE_DWARF_UNWIND_SUPPORT
3778 	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
3779 		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
3780 	}
3781 #endif
3782 
3783 	if (callchain_param.enabled) {
3784 		if (!mmap_pages_user_set && geteuid() == 0)
3785 			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
3786 
3787 		symbol_conf.use_callchain = true;
3788 	}
3789 
3790 	if (trace.evlist->nr_entries > 0) {
3791 		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
3792 		if (evlist__set_syscall_tp_fields(trace.evlist)) {
3793 			perror("failed to set syscalls:* tracepoint fields");
3794 			goto out;
3795 		}
3796 	}
3797 
3798 	if (trace.sort_events) {
3799 		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
3800 		ordered_events__set_copy_on_queue(&trace.oe.data, true);
3801 	}
3802 
3803 	/*
3804 	 * If we are augmenting syscalls, then combine what we put in the
3805 	 * __augmented_syscalls__ BPF map with what is in the
3806 	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
3807 	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
3808 	 *
3809 	 * We'll switch to look at two BPF maps, one for sys_enter and the
3810 	 * other for sys_exit when we start augmenting the sys_exit paths with
3811 	 * buffers that are being copied from kernel to userspace, think 'read'
3812 	 * syscall.
3813 	 */
3814 	if (trace.syscalls.events.augmented) {
3815 		evsel = trace.syscalls.events.augmented;
3816 
3817 		if (perf_evsel__init_augmented_syscall_tp(evsel) ||
3818 		    perf_evsel__init_augmented_syscall_tp_args(evsel))
3819 			goto out;
3820 		evsel->handler = trace__sys_enter;
3821 
3822 		evlist__for_each_entry(trace.evlist, evsel) {
3823 			bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
3824 
3825 			if (raw_syscalls_sys_exit) {
3826 				trace.raw_augmented_syscalls = true;
3827 				goto init_augmented_syscall_tp;
3828 			}
3829 
3830 			if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
3831 init_augmented_syscall_tp:
3832 				perf_evsel__init_augmented_syscall_tp(evsel);
3833 				perf_evsel__init_augmented_syscall_tp_ret(evsel);
3834 				evsel->handler = trace__sys_exit;
3835 			}
3836 		}
3837 	}
3838 
3839 	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
3840 		return trace__record(&trace, argc-1, &argv[1]);
3841 
3842 	/* summary_only implies summary option, but don't overwrite summary if set */
3843 	if (trace.summary_only)
3844 		trace.summary = trace.summary_only;
3845 
3846 	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
3847 	    trace.evlist->nr_entries == 0 /* Was --events used? */) {
3848 		trace.trace_syscalls = true;
3849 	}
3850 
3851 	if (output_name != NULL) {
3852 		err = trace__open_output(&trace, output_name);
3853 		if (err < 0) {
3854 			perror("failed to create output file");
3855 			goto out;
3856 		}
3857 	}
3858 
3859 	err = target__validate(&trace.opts.target);
3860 	if (err) {
3861 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3862 		fprintf(trace.output, "%s", bf);
3863 		goto out_close;
3864 	}
3865 
3866 	err = target__parse_uid(&trace.opts.target);
3867 	if (err) {
3868 		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3869 		fprintf(trace.output, "%s", bf);
3870 		goto out_close;
3871 	}
3872 
3873 	if (!argc && target__none(&trace.opts.target))
3874 		trace.opts.target.system_wide = true;
3875 
3876 	if (input_name)
3877 		err = trace__replay(&trace);
3878 	else
3879 		err = trace__run(&trace, argc, argv);
3880 
3881 out_close:
3882 	if (output_name != NULL)
3883 		fclose(trace.output);
3884 out:
3885 	return err;
3886 }
3887