xref: /linux/tools/perf/builtin-trace.c (revision c24ff998fc420891f17d73acab6766823d492175)
1 #include <traceevent/event-parse.h>
2 #include "builtin.h"
3 #include "util/color.h"
4 #include "util/evlist.h"
5 #include "util/machine.h"
6 #include "util/thread.h"
7 #include "util/parse-options.h"
8 #include "util/strlist.h"
9 #include "util/thread_map.h"
10 
11 #include <libaudit.h>
12 #include <stdlib.h>
13 
14 static struct syscall_fmt {
15 	const char *name;
16 	const char *alias;
17 	bool	   errmsg;
18 	bool	   timeout;
19 } syscall_fmts[] = {
20 	{ .name	    = "access",	    .errmsg = true, },
21 	{ .name	    = "arch_prctl", .errmsg = true, .alias = "prctl", },
22 	{ .name	    = "connect",    .errmsg = true, },
23 	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat", },
24 	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat", },
25 	{ .name	    = "futex",	    .errmsg = true, },
26 	{ .name	    = "open",	    .errmsg = true, },
27 	{ .name	    = "poll",	    .errmsg = true, .timeout = true, },
28 	{ .name	    = "ppoll",	    .errmsg = true, .timeout = true, },
29 	{ .name	    = "read",	    .errmsg = true, },
30 	{ .name	    = "recvfrom",   .errmsg = true, },
31 	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
32 	{ .name	    = "socket",	    .errmsg = true, },
33 	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat", },
34 };
35 
36 static int syscall_fmt__cmp(const void *name, const void *fmtp)
37 {
38 	const struct syscall_fmt *fmt = fmtp;
39 	return strcmp(name, fmt->name);
40 }
41 
42 static struct syscall_fmt *syscall_fmt__find(const char *name)
43 {
44 	const int nmemb = ARRAY_SIZE(syscall_fmts);
45 	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
46 }
47 
48 struct syscall {
49 	struct event_format *tp_format;
50 	const char	    *name;
51 	bool		    filtered;
52 	struct syscall_fmt  *fmt;
53 };
54 
55 static size_t fprintf_duration(unsigned long t, FILE *fp)
56 {
57 	double duration = (double)t / NSEC_PER_MSEC;
58 	size_t printed = fprintf(fp, "(");
59 
60 	if (duration >= 1.0)
61 		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
62 	else if (duration >= 0.01)
63 		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
64 	else
65 		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
66 	return printed + fprintf(fp, "): ");
67 }
68 
69 struct thread_trace {
70 	u64		  entry_time;
71 	u64		  exit_time;
72 	bool		  entry_pending;
73 	unsigned long	  nr_events;
74 	char		  *entry_str;
75 	double		  runtime_ms;
76 };
77 
78 static struct thread_trace *thread_trace__new(void)
79 {
80 	return zalloc(sizeof(struct thread_trace));
81 }
82 
83 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
84 {
85 	struct thread_trace *ttrace;
86 
87 	if (thread == NULL)
88 		goto fail;
89 
90 	if (thread->priv == NULL)
91 		thread->priv = thread_trace__new();
92 
93 	if (thread->priv == NULL)
94 		goto fail;
95 
96 	ttrace = thread->priv;
97 	++ttrace->nr_events;
98 
99 	return ttrace;
100 fail:
101 	color_fprintf(fp, PERF_COLOR_RED,
102 		      "WARNING: not enough memory, dropping samples!\n");
103 	return NULL;
104 }
105 
106 struct trace {
107 	struct perf_tool	tool;
108 	int			audit_machine;
109 	struct {
110 		int		max;
111 		struct syscall  *table;
112 	} syscalls;
113 	struct perf_record_opts opts;
114 	struct machine		host;
115 	u64			base_time;
116 	FILE			*output;
117 	struct strlist		*ev_qualifier;
118 	unsigned long		nr_events;
119 	bool			sched;
120 	bool			multiple_threads;
121 	double			duration_filter;
122 	double			runtime_ms;
123 };
124 
125 static bool trace__filter_duration(struct trace *trace, double t)
126 {
127 	return t < (trace->duration_filter * NSEC_PER_MSEC);
128 }
129 
130 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
131 {
132 	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
133 
134 	return fprintf(fp, "%10.3f ", ts);
135 }
136 
137 static bool done = false;
138 
139 static void sig_handler(int sig __maybe_unused)
140 {
141 	done = true;
142 }
143 
144 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
145 					u64 duration, u64 tstamp, FILE *fp)
146 {
147 	size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
148 	printed += fprintf_duration(duration, fp);
149 
150 	if (trace->multiple_threads)
151 		printed += fprintf(fp, "%d ", thread->tid);
152 
153 	return printed;
154 }
155 
156 static int trace__process_event(struct trace *trace, struct machine *machine,
157 				union perf_event *event)
158 {
159 	int ret = 0;
160 
161 	switch (event->header.type) {
162 	case PERF_RECORD_LOST:
163 		color_fprintf(trace->output, PERF_COLOR_RED,
164 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
165 		ret = machine__process_lost_event(machine, event);
166 	default:
167 		ret = machine__process_event(machine, event);
168 		break;
169 	}
170 
171 	return ret;
172 }
173 
174 static int trace__tool_process(struct perf_tool *tool,
175 			       union perf_event *event,
176 			       struct perf_sample *sample __maybe_unused,
177 			       struct machine *machine)
178 {
179 	struct trace *trace = container_of(tool, struct trace, tool);
180 	return trace__process_event(trace, machine, event);
181 }
182 
183 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
184 {
185 	int err = symbol__init();
186 
187 	if (err)
188 		return err;
189 
190 	machine__init(&trace->host, "", HOST_KERNEL_ID);
191 	machine__create_kernel_maps(&trace->host);
192 
193 	if (perf_target__has_task(&trace->opts.target)) {
194 		err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
195 							trace__tool_process,
196 							&trace->host);
197 	} else {
198 		err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
199 						     &trace->host);
200 	}
201 
202 	if (err)
203 		symbol__exit();
204 
205 	return err;
206 }
207 
208 static int trace__read_syscall_info(struct trace *trace, int id)
209 {
210 	char tp_name[128];
211 	struct syscall *sc;
212 	const char *name = audit_syscall_to_name(id, trace->audit_machine);
213 
214 	if (name == NULL)
215 		return -1;
216 
217 	if (id > trace->syscalls.max) {
218 		struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
219 
220 		if (nsyscalls == NULL)
221 			return -1;
222 
223 		if (trace->syscalls.max != -1) {
224 			memset(nsyscalls + trace->syscalls.max + 1, 0,
225 			       (id - trace->syscalls.max) * sizeof(*sc));
226 		} else {
227 			memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
228 		}
229 
230 		trace->syscalls.table = nsyscalls;
231 		trace->syscalls.max   = id;
232 	}
233 
234 	sc = trace->syscalls.table + id;
235 	sc->name = name;
236 
237 	if (trace->ev_qualifier && !strlist__find(trace->ev_qualifier, name)) {
238 		sc->filtered = true;
239 		/*
240  		 * No need to do read tracepoint information since this will be
241  		 * filtered out.
242  		 */
243 		return 0;
244 	}
245 
246 	sc->fmt  = syscall_fmt__find(sc->name);
247 
248 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
249 	sc->tp_format = event_format__new("syscalls", tp_name);
250 
251 	if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
252 		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
253 		sc->tp_format = event_format__new("syscalls", tp_name);
254 	}
255 
256 	return sc->tp_format != NULL ? 0 : -1;
257 }
258 
259 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
260 				      unsigned long *args)
261 {
262 	int i = 0;
263 	size_t printed = 0;
264 
265 	if (sc->tp_format != NULL) {
266 		struct format_field *field;
267 
268 		for (field = sc->tp_format->format.fields->next; field; field = field->next) {
269 			printed += scnprintf(bf + printed, size - printed,
270 					     "%s%s: %ld", printed ? ", " : "",
271 					     field->name, args[i++]);
272 		}
273 	} else {
274 		while (i < 6) {
275 			printed += scnprintf(bf + printed, size - printed,
276 					     "%sarg%d: %ld",
277 					     printed ? ", " : "", i, args[i]);
278 			++i;
279 		}
280 	}
281 
282 	return printed;
283 }
284 
285 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
286 				  struct perf_sample *sample);
287 
288 static struct syscall *trace__syscall_info(struct trace *trace,
289 					   struct perf_evsel *evsel,
290 					   struct perf_sample *sample)
291 {
292 	int id = perf_evsel__intval(evsel, sample, "id");
293 
294 	if (id < 0) {
295 		fprintf(trace->output, "Invalid syscall %d id, skipping...\n", id);
296 		return NULL;
297 	}
298 
299 	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
300 	    trace__read_syscall_info(trace, id))
301 		goto out_cant_read;
302 
303 	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
304 		goto out_cant_read;
305 
306 	return &trace->syscalls.table[id];
307 
308 out_cant_read:
309 	fprintf(trace->output, "Problems reading syscall %d", id);
310 	if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
311 		fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
312 	fputs(" information", trace->output);
313 	return NULL;
314 }
315 
316 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
317 			    struct perf_sample *sample)
318 {
319 	char *msg;
320 	void *args;
321 	size_t printed = 0;
322 	struct thread *thread;
323 	struct syscall *sc = trace__syscall_info(trace, evsel, sample);
324 	struct thread_trace *ttrace;
325 
326 	if (sc == NULL)
327 		return -1;
328 
329 	if (sc->filtered)
330 		return 0;
331 
332 	thread = machine__findnew_thread(&trace->host, sample->tid);
333 	ttrace = thread__trace(thread, trace->output);
334 	if (ttrace == NULL)
335 		return -1;
336 
337 	args = perf_evsel__rawptr(evsel, sample, "args");
338 	if (args == NULL) {
339 		fprintf(trace->output, "Problems reading syscall arguments\n");
340 		return -1;
341 	}
342 
343 	ttrace = thread->priv;
344 
345 	if (ttrace->entry_str == NULL) {
346 		ttrace->entry_str = malloc(1024);
347 		if (!ttrace->entry_str)
348 			return -1;
349 	}
350 
351 	ttrace->entry_time = sample->time;
352 	msg = ttrace->entry_str;
353 	printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
354 
355 	printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,  args);
356 
357 	if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
358 		if (!trace->duration_filter) {
359 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
360 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
361 		}
362 	} else
363 		ttrace->entry_pending = true;
364 
365 	return 0;
366 }
367 
368 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
369 			   struct perf_sample *sample)
370 {
371 	int ret;
372 	u64 duration = 0;
373 	struct thread *thread;
374 	struct syscall *sc = trace__syscall_info(trace, evsel, sample);
375 	struct thread_trace *ttrace;
376 
377 	if (sc == NULL)
378 		return -1;
379 
380 	if (sc->filtered)
381 		return 0;
382 
383 	thread = machine__findnew_thread(&trace->host, sample->tid);
384 	ttrace = thread__trace(thread, trace->output);
385 	if (ttrace == NULL)
386 		return -1;
387 
388 	ret = perf_evsel__intval(evsel, sample, "ret");
389 
390 	ttrace = thread->priv;
391 
392 	ttrace->exit_time = sample->time;
393 
394 	if (ttrace->entry_time) {
395 		duration = sample->time - ttrace->entry_time;
396 		if (trace__filter_duration(trace, duration))
397 			goto out;
398 	} else if (trace->duration_filter)
399 		goto out;
400 
401 	trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
402 
403 	if (ttrace->entry_pending) {
404 		fprintf(trace->output, "%-70s", ttrace->entry_str);
405 	} else {
406 		fprintf(trace->output, " ... [");
407 		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
408 		fprintf(trace->output, "]: %s()", sc->name);
409 	}
410 
411 	if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
412 		char bf[256];
413 		const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
414 			   *e = audit_errno_to_name(-ret);
415 
416 		fprintf(trace->output, ") = -1 %s %s", e, emsg);
417 	} else if (ret == 0 && sc->fmt && sc->fmt->timeout)
418 		fprintf(trace->output, ") = 0 Timeout");
419 	else
420 		fprintf(trace->output, ") = %d", ret);
421 
422 	fputc('\n', trace->output);
423 out:
424 	ttrace->entry_pending = false;
425 
426 	return 0;
427 }
428 
429 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
430 				     struct perf_sample *sample)
431 {
432         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
433 	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
434 	struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
435 	struct thread_trace *ttrace = thread__trace(thread, trace->output);
436 
437 	if (ttrace == NULL)
438 		goto out_dump;
439 
440 	ttrace->runtime_ms += runtime_ms;
441 	trace->runtime_ms += runtime_ms;
442 	return 0;
443 
444 out_dump:
445 	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
446 	       evsel->name,
447 	       perf_evsel__strval(evsel, sample, "comm"),
448 	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
449 	       runtime,
450 	       perf_evsel__intval(evsel, sample, "vruntime"));
451 	return 0;
452 }
453 
454 static int trace__run(struct trace *trace, int argc, const char **argv)
455 {
456 	struct perf_evlist *evlist = perf_evlist__new();
457 	struct perf_evsel *evsel;
458 	int err = -1, i;
459 	unsigned long before;
460 	const bool forks = argc > 0;
461 
462 	if (evlist == NULL) {
463 		fprintf(trace->output, "Not enough memory to run!\n");
464 		goto out;
465 	}
466 
467 	if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
468 	    perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
469 		fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
470 		goto out_delete_evlist;
471 	}
472 
473 	if (trace->sched &&
474 	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
475 				   trace__sched_stat_runtime)) {
476 		fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
477 		goto out_delete_evlist;
478 	}
479 
480 	err = perf_evlist__create_maps(evlist, &trace->opts.target);
481 	if (err < 0) {
482 		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
483 		goto out_delete_evlist;
484 	}
485 
486 	err = trace__symbols_init(trace, evlist);
487 	if (err < 0) {
488 		fprintf(trace->output, "Problems initializing symbol libraries!\n");
489 		goto out_delete_maps;
490 	}
491 
492 	perf_evlist__config(evlist, &trace->opts);
493 
494 	signal(SIGCHLD, sig_handler);
495 	signal(SIGINT, sig_handler);
496 
497 	if (forks) {
498 		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
499 						    argv, false, false);
500 		if (err < 0) {
501 			fprintf(trace->output, "Couldn't run the workload!\n");
502 			goto out_delete_maps;
503 		}
504 	}
505 
506 	err = perf_evlist__open(evlist);
507 	if (err < 0) {
508 		fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
509 		goto out_delete_maps;
510 	}
511 
512 	err = perf_evlist__mmap(evlist, UINT_MAX, false);
513 	if (err < 0) {
514 		fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
515 		goto out_close_evlist;
516 	}
517 
518 	perf_evlist__enable(evlist);
519 
520 	if (forks)
521 		perf_evlist__start_workload(evlist);
522 
523 	trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
524 again:
525 	before = trace->nr_events;
526 
527 	for (i = 0; i < evlist->nr_mmaps; i++) {
528 		union perf_event *event;
529 
530 		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
531 			const u32 type = event->header.type;
532 			tracepoint_handler handler;
533 			struct perf_sample sample;
534 
535 			++trace->nr_events;
536 
537 			err = perf_evlist__parse_sample(evlist, event, &sample);
538 			if (err) {
539 				fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
540 				continue;
541 			}
542 
543 			if (trace->base_time == 0)
544 				trace->base_time = sample.time;
545 
546 			if (type != PERF_RECORD_SAMPLE) {
547 				trace__process_event(trace, &trace->host, event);
548 				continue;
549 			}
550 
551 			evsel = perf_evlist__id2evsel(evlist, sample.id);
552 			if (evsel == NULL) {
553 				fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
554 				continue;
555 			}
556 
557 			if (sample.raw_data == NULL) {
558 				fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
559 				       perf_evsel__name(evsel), sample.tid,
560 				       sample.cpu, sample.raw_size);
561 				continue;
562 			}
563 
564 			handler = evsel->handler.func;
565 			handler(trace, evsel, &sample);
566 		}
567 	}
568 
569 	if (trace->nr_events == before) {
570 		if (done)
571 			goto out_unmap_evlist;
572 
573 		poll(evlist->pollfd, evlist->nr_fds, -1);
574 	}
575 
576 	if (done)
577 		perf_evlist__disable(evlist);
578 
579 	goto again;
580 
581 out_unmap_evlist:
582 	perf_evlist__munmap(evlist);
583 out_close_evlist:
584 	perf_evlist__close(evlist);
585 out_delete_maps:
586 	perf_evlist__delete_maps(evlist);
587 out_delete_evlist:
588 	perf_evlist__delete(evlist);
589 out:
590 	return err;
591 }
592 
593 static size_t trace__fprintf_threads_header(FILE *fp)
594 {
595 	size_t printed;
596 
597 	printed  = fprintf(fp, "\n _____________________________________________________________________\n");
598 	printed += fprintf(fp," __)    Summary of events    (__\n\n");
599 	printed += fprintf(fp,"              [ task - pid ]     [ events ] [ ratio ]  [ runtime ]\n");
600 	printed += fprintf(fp," _____________________________________________________________________\n\n");
601 
602 	return printed;
603 }
604 
605 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
606 {
607 	size_t printed = trace__fprintf_threads_header(fp);
608 	struct rb_node *nd;
609 
610 	for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
611 		struct thread *thread = rb_entry(nd, struct thread, rb_node);
612 		struct thread_trace *ttrace = thread->priv;
613 		const char *color;
614 		double ratio;
615 
616 		if (ttrace == NULL)
617 			continue;
618 
619 		ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
620 
621 		color = PERF_COLOR_NORMAL;
622 		if (ratio > 50.0)
623 			color = PERF_COLOR_RED;
624 		else if (ratio > 25.0)
625 			color = PERF_COLOR_GREEN;
626 		else if (ratio > 5.0)
627 			color = PERF_COLOR_YELLOW;
628 
629 		printed += color_fprintf(fp, color, "%20s", thread->comm);
630 		printed += fprintf(fp, " - %-5d :%11lu   [", thread->tid, ttrace->nr_events);
631 		printed += color_fprintf(fp, color, "%5.1f%%", ratio);
632 		printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
633 	}
634 
635 	return printed;
636 }
637 
638 static int trace__set_duration(const struct option *opt, const char *str,
639 			       int unset __maybe_unused)
640 {
641 	struct trace *trace = opt->value;
642 
643 	trace->duration_filter = atof(str);
644 	return 0;
645 }
646 
647 static int trace__open_output(struct trace *trace, const char *filename)
648 {
649 	struct stat st;
650 
651 	if (!stat(filename, &st) && st.st_size) {
652 		char oldname[PATH_MAX];
653 
654 		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
655 		unlink(oldname);
656 		rename(filename, oldname);
657 	}
658 
659 	trace->output = fopen(filename, "w");
660 
661 	return trace->output == NULL ? -errno : 0;
662 }
663 
664 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
665 {
666 	const char * const trace_usage[] = {
667 		"perf trace [<options>] [<command>]",
668 		"perf trace [<options>] -- <command> [<options>]",
669 		NULL
670 	};
671 	struct trace trace = {
672 		.audit_machine = audit_detect_machine(),
673 		.syscalls = {
674 			. max = -1,
675 		},
676 		.opts = {
677 			.target = {
678 				.uid	   = UINT_MAX,
679 				.uses_mmap = true,
680 			},
681 			.user_freq     = UINT_MAX,
682 			.user_interval = ULLONG_MAX,
683 			.no_delay      = true,
684 			.mmap_pages    = 1024,
685 		},
686 		.output = stdout,
687 	};
688 	const char *output_name = NULL;
689 	const char *ev_qualifier_str = NULL;
690 	const struct option trace_options[] = {
691 	OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
692 		    "list of events to trace"),
693 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
694 	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
695 		    "trace events on existing process id"),
696 	OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
697 		    "trace events on existing thread id"),
698 	OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
699 		    "system-wide collection from all CPUs"),
700 	OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
701 		    "list of cpus to monitor"),
702 	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
703 		    "child tasks do not inherit counters"),
704 	OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
705 		     "number of mmap data pages"),
706 	OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
707 		   "user to profile"),
708 	OPT_CALLBACK(0, "duration", &trace, "float",
709 		     "show only events with duration > N.M ms",
710 		     trace__set_duration),
711 	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
712 	OPT_END()
713 	};
714 	int err;
715 	char bf[BUFSIZ];
716 
717 	argc = parse_options(argc, argv, trace_options, trace_usage, 0);
718 
719 	if (output_name != NULL) {
720 		err = trace__open_output(&trace, output_name);
721 		if (err < 0) {
722 			perror("failed to create output file");
723 			goto out;
724 		}
725 	}
726 
727 	if (ev_qualifier_str != NULL) {
728 		trace.ev_qualifier = strlist__new(true, ev_qualifier_str);
729 		if (trace.ev_qualifier == NULL) {
730 			fputs("Not enough memory to parse event qualifier",
731 			      trace.output);
732 			err = -ENOMEM;
733 			goto out_close;
734 		}
735 	}
736 
737 	err = perf_target__validate(&trace.opts.target);
738 	if (err) {
739 		perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
740 		fprintf(trace.output, "%s", bf);
741 		goto out_close;
742 	}
743 
744 	err = perf_target__parse_uid(&trace.opts.target);
745 	if (err) {
746 		perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
747 		fprintf(trace.output, "%s", bf);
748 		goto out_close;
749 	}
750 
751 	if (!argc && perf_target__none(&trace.opts.target))
752 		trace.opts.target.system_wide = true;
753 
754 	err = trace__run(&trace, argc, argv);
755 
756 	if (trace.sched && !err)
757 		trace__fprintf_thread_summary(&trace, trace.output);
758 
759 out_close:
760 	if (output_name != NULL)
761 		fclose(trace.output);
762 out:
763 	return err;
764 }
765