xref: /linux/tools/perf/builtin-ftrace.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-ftrace.c
4  *
5  * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
6  */
7 
8 #include "builtin.h"
9 
10 #include <errno.h>
11 #include <unistd.h>
12 #include <signal.h>
13 #include <stdlib.h>
14 #include <fcntl.h>
15 #include <poll.h>
16 #include <linux/capability.h>
17 #include <linux/string.h>
18 
19 #include "debug.h"
20 #include <subcmd/pager.h>
21 #include <subcmd/parse-options.h>
22 #include <api/fs/tracing_path.h>
23 #include "evlist.h"
24 #include "target.h"
25 #include "cpumap.h"
26 #include "thread_map.h"
27 #include "util/cap.h"
28 #include "util/config.h"
29 
30 #define DEFAULT_TRACER  "function_graph"
31 
32 struct perf_ftrace {
33 	struct evlist		*evlist;
34 	struct target		target;
35 	const char		*tracer;
36 	struct list_head	filters;
37 	struct list_head	notrace;
38 	struct list_head	graph_funcs;
39 	struct list_head	nograph_funcs;
40 	int			graph_depth;
41 };
42 
43 struct filter_entry {
44 	struct list_head	list;
45 	char			name[];
46 };
47 
48 static volatile int workload_exec_errno;
49 static bool done;
50 
51 static void sig_handler(int sig __maybe_unused)
52 {
53 	done = true;
54 }
55 
56 /*
57  * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
58  * we asked by setting its exec_error to the function below,
59  * ftrace__workload_exec_failed_signal.
60  *
61  * XXX We need to handle this more appropriately, emitting an error, etc.
62  */
63 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
64 						siginfo_t *info __maybe_unused,
65 						void *ucontext __maybe_unused)
66 {
67 	workload_exec_errno = info->si_value.sival_int;
68 	done = true;
69 }
70 
71 static int __write_tracing_file(const char *name, const char *val, bool append)
72 {
73 	char *file;
74 	int fd, ret = -1;
75 	ssize_t size = strlen(val);
76 	int flags = O_WRONLY;
77 	char errbuf[512];
78 	char *val_copy;
79 
80 	file = get_tracing_file(name);
81 	if (!file) {
82 		pr_debug("cannot get tracing file: %s\n", name);
83 		return -1;
84 	}
85 
86 	if (append)
87 		flags |= O_APPEND;
88 	else
89 		flags |= O_TRUNC;
90 
91 	fd = open(file, flags);
92 	if (fd < 0) {
93 		pr_debug("cannot open tracing file: %s: %s\n",
94 			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
95 		goto out;
96 	}
97 
98 	/*
99 	 * Copy the original value and append a '\n'. Without this,
100 	 * the kernel can hide possible errors.
101 	 */
102 	val_copy = strdup(val);
103 	if (!val_copy)
104 		goto out_close;
105 	val_copy[size] = '\n';
106 
107 	if (write(fd, val_copy, size + 1) == size + 1)
108 		ret = 0;
109 	else
110 		pr_debug("write '%s' to tracing/%s failed: %s\n",
111 			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
112 
113 	free(val_copy);
114 out_close:
115 	close(fd);
116 out:
117 	put_tracing_file(file);
118 	return ret;
119 }
120 
121 static int write_tracing_file(const char *name, const char *val)
122 {
123 	return __write_tracing_file(name, val, false);
124 }
125 
126 static int append_tracing_file(const char *name, const char *val)
127 {
128 	return __write_tracing_file(name, val, true);
129 }
130 
131 static int reset_tracing_cpu(void);
132 static void reset_tracing_filters(void);
133 
134 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
135 {
136 	if (write_tracing_file("tracing_on", "0") < 0)
137 		return -1;
138 
139 	if (write_tracing_file("current_tracer", "nop") < 0)
140 		return -1;
141 
142 	if (write_tracing_file("set_ftrace_pid", " ") < 0)
143 		return -1;
144 
145 	if (reset_tracing_cpu() < 0)
146 		return -1;
147 
148 	if (write_tracing_file("max_graph_depth", "0") < 0)
149 		return -1;
150 
151 	reset_tracing_filters();
152 	return 0;
153 }
154 
155 static int set_tracing_pid(struct perf_ftrace *ftrace)
156 {
157 	int i;
158 	char buf[16];
159 
160 	if (target__has_cpu(&ftrace->target))
161 		return 0;
162 
163 	for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
164 		scnprintf(buf, sizeof(buf), "%d",
165 			  ftrace->evlist->core.threads->map[i]);
166 		if (append_tracing_file("set_ftrace_pid", buf) < 0)
167 			return -1;
168 	}
169 	return 0;
170 }
171 
172 static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
173 {
174 	char *cpumask;
175 	size_t mask_size;
176 	int ret;
177 	int last_cpu;
178 
179 	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
180 	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
181 	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
182 
183 	cpumask = malloc(mask_size);
184 	if (cpumask == NULL) {
185 		pr_debug("failed to allocate cpu mask\n");
186 		return -1;
187 	}
188 
189 	cpu_map__snprint_mask(cpumap, cpumask, mask_size);
190 
191 	ret = write_tracing_file("tracing_cpumask", cpumask);
192 
193 	free(cpumask);
194 	return ret;
195 }
196 
197 static int set_tracing_cpu(struct perf_ftrace *ftrace)
198 {
199 	struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
200 
201 	if (!target__has_cpu(&ftrace->target))
202 		return 0;
203 
204 	return set_tracing_cpumask(cpumap);
205 }
206 
207 static int reset_tracing_cpu(void)
208 {
209 	struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
210 	int ret;
211 
212 	ret = set_tracing_cpumask(cpumap);
213 	perf_cpu_map__put(cpumap);
214 	return ret;
215 }
216 
217 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
218 {
219 	struct filter_entry *pos;
220 
221 	list_for_each_entry(pos, funcs, list) {
222 		if (append_tracing_file(filter_file, pos->name) < 0)
223 			return -1;
224 	}
225 
226 	return 0;
227 }
228 
229 static int set_tracing_filters(struct perf_ftrace *ftrace)
230 {
231 	int ret;
232 
233 	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
234 	if (ret < 0)
235 		return ret;
236 
237 	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
238 	if (ret < 0)
239 		return ret;
240 
241 	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
242 	if (ret < 0)
243 		return ret;
244 
245 	/* old kernels do not have this filter */
246 	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
247 
248 	return ret;
249 }
250 
251 static void reset_tracing_filters(void)
252 {
253 	write_tracing_file("set_ftrace_filter", " ");
254 	write_tracing_file("set_ftrace_notrace", " ");
255 	write_tracing_file("set_graph_function", " ");
256 	write_tracing_file("set_graph_notrace", " ");
257 }
258 
259 static int set_tracing_depth(struct perf_ftrace *ftrace)
260 {
261 	char buf[16];
262 
263 	if (ftrace->graph_depth == 0)
264 		return 0;
265 
266 	if (ftrace->graph_depth < 0) {
267 		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
268 		return -1;
269 	}
270 
271 	snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
272 
273 	if (write_tracing_file("max_graph_depth", buf) < 0)
274 		return -1;
275 
276 	return 0;
277 }
278 
279 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
280 {
281 	char *trace_file;
282 	int trace_fd;
283 	char buf[4096];
284 	struct pollfd pollfd = {
285 		.events = POLLIN,
286 	};
287 
288 	if (!(perf_cap__capable(CAP_PERFMON) ||
289 	      perf_cap__capable(CAP_SYS_ADMIN))) {
290 		pr_err("ftrace only works for %s!\n",
291 #ifdef HAVE_LIBCAP_SUPPORT
292 		"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
293 #else
294 		"root"
295 #endif
296 		);
297 		return -1;
298 	}
299 
300 	signal(SIGINT, sig_handler);
301 	signal(SIGUSR1, sig_handler);
302 	signal(SIGCHLD, sig_handler);
303 	signal(SIGPIPE, sig_handler);
304 
305 	if (reset_tracing_files(ftrace) < 0) {
306 		pr_err("failed to reset ftrace\n");
307 		goto out;
308 	}
309 
310 	/* reset ftrace buffer */
311 	if (write_tracing_file("trace", "0") < 0)
312 		goto out;
313 
314 	if (argc && perf_evlist__prepare_workload(ftrace->evlist,
315 				&ftrace->target, argv, false,
316 				ftrace__workload_exec_failed_signal) < 0) {
317 		goto out;
318 	}
319 
320 	if (set_tracing_pid(ftrace) < 0) {
321 		pr_err("failed to set ftrace pid\n");
322 		goto out_reset;
323 	}
324 
325 	if (set_tracing_cpu(ftrace) < 0) {
326 		pr_err("failed to set tracing cpumask\n");
327 		goto out_reset;
328 	}
329 
330 	if (set_tracing_filters(ftrace) < 0) {
331 		pr_err("failed to set tracing filters\n");
332 		goto out_reset;
333 	}
334 
335 	if (set_tracing_depth(ftrace) < 0) {
336 		pr_err("failed to set graph depth\n");
337 		goto out_reset;
338 	}
339 
340 	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
341 		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
342 		goto out_reset;
343 	}
344 
345 	setup_pager();
346 
347 	trace_file = get_tracing_file("trace_pipe");
348 	if (!trace_file) {
349 		pr_err("failed to open trace_pipe\n");
350 		goto out_reset;
351 	}
352 
353 	trace_fd = open(trace_file, O_RDONLY);
354 
355 	put_tracing_file(trace_file);
356 
357 	if (trace_fd < 0) {
358 		pr_err("failed to open trace_pipe\n");
359 		goto out_reset;
360 	}
361 
362 	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
363 	pollfd.fd = trace_fd;
364 
365 	if (write_tracing_file("tracing_on", "1") < 0) {
366 		pr_err("can't enable tracing\n");
367 		goto out_close_fd;
368 	}
369 
370 	perf_evlist__start_workload(ftrace->evlist);
371 
372 	while (!done) {
373 		if (poll(&pollfd, 1, -1) < 0)
374 			break;
375 
376 		if (pollfd.revents & POLLIN) {
377 			int n = read(trace_fd, buf, sizeof(buf));
378 			if (n < 0)
379 				break;
380 			if (fwrite(buf, n, 1, stdout) != 1)
381 				break;
382 		}
383 	}
384 
385 	write_tracing_file("tracing_on", "0");
386 
387 	if (workload_exec_errno) {
388 		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
389 		/* flush stdout first so below error msg appears at the end. */
390 		fflush(stdout);
391 		pr_err("workload failed: %s\n", emsg);
392 		goto out_close_fd;
393 	}
394 
395 	/* read remaining buffer contents */
396 	while (true) {
397 		int n = read(trace_fd, buf, sizeof(buf));
398 		if (n <= 0)
399 			break;
400 		if (fwrite(buf, n, 1, stdout) != 1)
401 			break;
402 	}
403 
404 out_close_fd:
405 	close(trace_fd);
406 out_reset:
407 	reset_tracing_files(ftrace);
408 out:
409 	return (done && !workload_exec_errno) ? 0 : -1;
410 }
411 
412 static int perf_ftrace_config(const char *var, const char *value, void *cb)
413 {
414 	struct perf_ftrace *ftrace = cb;
415 
416 	if (!strstarts(var, "ftrace."))
417 		return 0;
418 
419 	if (strcmp(var, "ftrace.tracer"))
420 		return -1;
421 
422 	if (!strcmp(value, "function_graph") ||
423 	    !strcmp(value, "function")) {
424 		ftrace->tracer = value;
425 		return 0;
426 	}
427 
428 	pr_err("Please select \"function_graph\" (default) or \"function\"\n");
429 	return -1;
430 }
431 
432 static int parse_filter_func(const struct option *opt, const char *str,
433 			     int unset __maybe_unused)
434 {
435 	struct list_head *head = opt->value;
436 	struct filter_entry *entry;
437 
438 	entry = malloc(sizeof(*entry) + strlen(str) + 1);
439 	if (entry == NULL)
440 		return -ENOMEM;
441 
442 	strcpy(entry->name, str);
443 	list_add_tail(&entry->list, head);
444 
445 	return 0;
446 }
447 
448 static void delete_filter_func(struct list_head *head)
449 {
450 	struct filter_entry *pos, *tmp;
451 
452 	list_for_each_entry_safe(pos, tmp, head, list) {
453 		list_del_init(&pos->list);
454 		free(pos);
455 	}
456 }
457 
458 int cmd_ftrace(int argc, const char **argv)
459 {
460 	int ret;
461 	struct perf_ftrace ftrace = {
462 		.tracer = DEFAULT_TRACER,
463 		.target = { .uid = UINT_MAX, },
464 	};
465 	const char * const ftrace_usage[] = {
466 		"perf ftrace [<options>] [<command>]",
467 		"perf ftrace [<options>] -- <command> [<options>]",
468 		NULL
469 	};
470 	const struct option ftrace_options[] = {
471 	OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
472 		   "tracer to use: function_graph(default) or function"),
473 	OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
474 		   "trace on existing process id"),
475 	OPT_INCR('v', "verbose", &verbose,
476 		 "be more verbose"),
477 	OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
478 		    "system-wide collection from all CPUs"),
479 	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
480 		    "list of cpus to monitor"),
481 	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
482 		     "trace given functions only", parse_filter_func),
483 	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
484 		     "do not trace given functions", parse_filter_func),
485 	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
486 		     "Set graph filter on given functions", parse_filter_func),
487 	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
488 		     "Set nograph filter on given functions", parse_filter_func),
489 	OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
490 		    "Max depth for function graph tracer"),
491 	OPT_END()
492 	};
493 
494 	INIT_LIST_HEAD(&ftrace.filters);
495 	INIT_LIST_HEAD(&ftrace.notrace);
496 	INIT_LIST_HEAD(&ftrace.graph_funcs);
497 	INIT_LIST_HEAD(&ftrace.nograph_funcs);
498 
499 	ret = perf_config(perf_ftrace_config, &ftrace);
500 	if (ret < 0)
501 		return -1;
502 
503 	argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
504 			    PARSE_OPT_STOP_AT_NON_OPTION);
505 	if (!argc && target__none(&ftrace.target))
506 		ftrace.target.system_wide = true;
507 
508 	ret = target__validate(&ftrace.target);
509 	if (ret) {
510 		char errbuf[512];
511 
512 		target__strerror(&ftrace.target, ret, errbuf, 512);
513 		pr_err("%s\n", errbuf);
514 		goto out_delete_filters;
515 	}
516 
517 	ftrace.evlist = evlist__new();
518 	if (ftrace.evlist == NULL) {
519 		ret = -ENOMEM;
520 		goto out_delete_filters;
521 	}
522 
523 	ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
524 	if (ret < 0)
525 		goto out_delete_evlist;
526 
527 	ret = __cmd_ftrace(&ftrace, argc, argv);
528 
529 out_delete_evlist:
530 	evlist__delete(ftrace.evlist);
531 
532 out_delete_filters:
533 	delete_filter_func(&ftrace.filters);
534 	delete_filter_func(&ftrace.notrace);
535 	delete_filter_func(&ftrace.graph_funcs);
536 	delete_filter_func(&ftrace.nograph_funcs);
537 
538 	return ret;
539 }
540