xref: /linux/tools/perf/builtin-ftrace.c (revision 2a1ea59de83bf367215e2a4dd9bf8bbd061349b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * builtin-ftrace.c
4  *
5  * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
6  * Copyright (c) 2020  Changbin Du <changbin.du@gmail.com>, significant enhancement.
7  */
8 
9 #include "builtin.h"
10 
11 #include <errno.h>
12 #include <unistd.h>
13 #include <signal.h>
14 #include <stdlib.h>
15 #include <fcntl.h>
16 #include <inttypes.h>
17 #include <math.h>
18 #include <poll.h>
19 #include <ctype.h>
20 #include <linux/capability.h>
21 #include <linux/string.h>
22 #include <sys/stat.h>
23 
24 #include "debug.h"
25 #include <subcmd/pager.h>
26 #include <subcmd/parse-options.h>
27 #include <api/io.h>
28 #include <api/fs/tracing_path.h>
29 #include "evlist.h"
30 #include "target.h"
31 #include "cpumap.h"
32 #include "hashmap.h"
33 #include "thread_map.h"
34 #include "strfilter.h"
35 #include "util/cap.h"
36 #include "util/config.h"
37 #include "util/ftrace.h"
38 #include "util/stat.h"
39 #include "util/units.h"
40 #include "util/parse-sublevel-options.h"
41 
42 #define DEFAULT_TRACER  "function_graph"
43 
44 static volatile sig_atomic_t workload_exec_errno;
45 static volatile sig_atomic_t done;
46 
47 static struct stats latency_stats;  /* for tracepoints */
48 
49 static char tracing_instance[PATH_MAX];	/* Trace instance directory */
50 
51 static void sig_handler(int sig __maybe_unused)
52 {
53 	done = true;
54 }
55 
56 /*
57  * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
58  * we asked by setting its exec_error to the function below,
59  * ftrace__workload_exec_failed_signal.
60  *
61  * XXX We need to handle this more appropriately, emitting an error, etc.
62  */
63 static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
64 						siginfo_t *info __maybe_unused,
65 						void *ucontext __maybe_unused)
66 {
67 	workload_exec_errno = info->si_value.sival_int;
68 	done = true;
69 }
70 
71 static bool check_ftrace_capable(void)
72 {
73 	bool used_root;
74 
75 	if (perf_cap__capable(CAP_PERFMON, &used_root))
76 		return true;
77 
78 	if (!used_root && perf_cap__capable(CAP_SYS_ADMIN, &used_root))
79 		return true;
80 
81 	pr_err("ftrace only works for %s!\n",
82 		used_root ? "root"
83 			  : "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
84 		);
85 	return false;
86 }
87 
88 static bool is_ftrace_supported(void)
89 {
90 	char *file;
91 	bool supported = false;
92 
93 	file = get_tracing_file("set_ftrace_pid");
94 	if (!file) {
95 		pr_debug("cannot get tracing file set_ftrace_pid\n");
96 		return false;
97 	}
98 
99 	if (!access(file, F_OK))
100 		supported = true;
101 
102 	put_tracing_file(file);
103 	return supported;
104 }
105 
106 /*
107  * Wrapper to test if a file in directory .../tracing/instances/XXX
108  * exists. If so return the .../tracing/instances/XXX file for use.
109  * Otherwise the file exists only in directory .../tracing and
110  * is applicable to all instances, for example file available_filter_functions.
111  * Return that file name in this case.
112  *
113  * This functions works similar to get_tracing_file() and expects its caller
114  * to free the returned file name.
115  *
116  * The global variable tracing_instance is set in init_tracing_instance()
117  * called at the  beginning to a process specific tracing subdirectory.
118  */
119 static char *get_tracing_instance_file(const char *name)
120 {
121 	char *file;
122 
123 	if (asprintf(&file, "%s/%s", tracing_instance, name) < 0)
124 		return NULL;
125 
126 	if (!access(file, F_OK))
127 		return file;
128 
129 	free(file);
130 	file = get_tracing_file(name);
131 	return file;
132 }
133 
134 static int __write_tracing_file(const char *name, const char *val, bool append)
135 {
136 	char *file;
137 	int fd, ret = -1;
138 	ssize_t size = strlen(val);
139 	int flags = O_WRONLY;
140 	char errbuf[512];
141 	char *val_copy;
142 
143 	file = get_tracing_instance_file(name);
144 	if (!file) {
145 		pr_debug("cannot get tracing file: %s\n", name);
146 		return -1;
147 	}
148 
149 	if (append)
150 		flags |= O_APPEND;
151 	else
152 		flags |= O_TRUNC;
153 
154 	fd = open(file, flags);
155 	if (fd < 0) {
156 		pr_debug("cannot open tracing file: %s: %s\n",
157 			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
158 		goto out;
159 	}
160 
161 	/*
162 	 * Copy the original value and append a '\n'. Without this,
163 	 * the kernel can hide possible errors.
164 	 */
165 	val_copy = strdup(val);
166 	if (!val_copy)
167 		goto out_close;
168 	val_copy[size] = '\n';
169 
170 	if (write(fd, val_copy, size + 1) == size + 1)
171 		ret = 0;
172 	else
173 		pr_debug("write '%s' to tracing/%s failed: %s\n",
174 			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
175 
176 	free(val_copy);
177 out_close:
178 	close(fd);
179 out:
180 	put_tracing_file(file);
181 	return ret;
182 }
183 
184 static int write_tracing_file(const char *name, const char *val)
185 {
186 	return __write_tracing_file(name, val, false);
187 }
188 
189 static int append_tracing_file(const char *name, const char *val)
190 {
191 	return __write_tracing_file(name, val, true);
192 }
193 
194 static int read_tracing_file_to_stdout(const char *name)
195 {
196 	char buf[4096];
197 	char *file;
198 	int fd;
199 	int ret = -1;
200 
201 	file = get_tracing_instance_file(name);
202 	if (!file) {
203 		pr_debug("cannot get tracing file: %s\n", name);
204 		return -1;
205 	}
206 
207 	fd = open(file, O_RDONLY);
208 	if (fd < 0) {
209 		pr_debug("cannot open tracing file: %s: %s\n",
210 			 name, str_error_r(errno, buf, sizeof(buf)));
211 		goto out;
212 	}
213 
214 	/* read contents to stdout */
215 	while (true) {
216 		int n = read(fd, buf, sizeof(buf));
217 		if (n == 0)
218 			break;
219 		else if (n < 0)
220 			goto out_close;
221 
222 		if (fwrite(buf, n, 1, stdout) != 1)
223 			goto out_close;
224 	}
225 	ret = 0;
226 
227 out_close:
228 	close(fd);
229 out:
230 	put_tracing_file(file);
231 	return ret;
232 }
233 
234 static int read_tracing_file_by_line(const char *name,
235 				     void (*cb)(char *str, void *arg),
236 				     void *cb_arg)
237 {
238 	char *line = NULL;
239 	size_t len = 0;
240 	char *file;
241 	FILE *fp;
242 
243 	file = get_tracing_instance_file(name);
244 	if (!file) {
245 		pr_debug("cannot get tracing file: %s\n", name);
246 		return -1;
247 	}
248 
249 	fp = fopen(file, "r");
250 	if (fp == NULL) {
251 		pr_debug("cannot open tracing file: %s\n", name);
252 		put_tracing_file(file);
253 		return -1;
254 	}
255 
256 	while (getline(&line, &len, fp) != -1) {
257 		cb(line, cb_arg);
258 	}
259 
260 	if (line)
261 		free(line);
262 
263 	fclose(fp);
264 	put_tracing_file(file);
265 	return 0;
266 }
267 
268 static int write_tracing_file_int(const char *name, int value)
269 {
270 	char buf[16];
271 
272 	snprintf(buf, sizeof(buf), "%d", value);
273 	if (write_tracing_file(name, buf) < 0)
274 		return -1;
275 
276 	return 0;
277 }
278 
279 static int write_tracing_option_file(const char *name, const char *val)
280 {
281 	char *file;
282 	int ret;
283 
284 	if (asprintf(&file, "options/%s", name) < 0)
285 		return -1;
286 
287 	ret = __write_tracing_file(file, val, false);
288 	free(file);
289 	return ret;
290 }
291 
292 static int reset_tracing_cpu(void);
293 static void reset_tracing_filters(void);
294 
295 static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
296 {
297 	write_tracing_option_file("function-fork", "0");
298 	write_tracing_option_file("func_stack_trace", "0");
299 	write_tracing_option_file("sleep-time", "1");
300 	write_tracing_option_file("funcgraph-irqs", "1");
301 	write_tracing_option_file("funcgraph-proc", "0");
302 	write_tracing_option_file("funcgraph-abstime", "0");
303 	write_tracing_option_file("funcgraph-tail", "0");
304 	write_tracing_option_file("latency-format", "0");
305 	write_tracing_option_file("irq-info", "0");
306 }
307 
308 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
309 {
310 	if (write_tracing_file("tracing_on", "0") < 0)
311 		return -1;
312 
313 	if (write_tracing_file("current_tracer", "nop") < 0)
314 		return -1;
315 
316 	if (write_tracing_file("set_ftrace_pid", " ") < 0)
317 		return -1;
318 
319 	if (reset_tracing_cpu() < 0)
320 		return -1;
321 
322 	if (write_tracing_file("max_graph_depth", "0") < 0)
323 		return -1;
324 
325 	if (write_tracing_file("tracing_thresh", "0") < 0)
326 		return -1;
327 
328 	reset_tracing_filters();
329 	reset_tracing_options(ftrace);
330 	return 0;
331 }
332 
333 /* Remove .../tracing/instances/XXX subdirectory created with
334  * init_tracing_instance().
335  */
336 static void exit_tracing_instance(void)
337 {
338 	if (rmdir(tracing_instance))
339 		pr_err("failed to delete tracing/instances directory\n");
340 }
341 
342 /* Create subdirectory within .../tracing/instances/XXX to have session
343  * or process specific setup. To delete this setup, simply remove the
344  * subdirectory.
345  */
346 static int init_tracing_instance(void)
347 {
348 	char dirname[] = "instances/perf-ftrace-XXXXXX";
349 	char *path;
350 
351 	path = get_tracing_file(dirname);
352 	if (!path)
353 		goto error;
354 	strncpy(tracing_instance, path, sizeof(tracing_instance) - 1);
355 	put_tracing_file(path);
356 	path = mkdtemp(tracing_instance);
357 	if (!path)
358 		goto error;
359 	return 0;
360 
361 error:
362 	pr_err("failed to create tracing/instances directory\n");
363 	return -1;
364 }
365 
366 static int set_tracing_pid(struct perf_ftrace *ftrace)
367 {
368 	int i;
369 	char buf[16];
370 
371 	if (target__has_cpu(&ftrace->target))
372 		return 0;
373 
374 	for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
375 		scnprintf(buf, sizeof(buf), "%d",
376 			  perf_thread_map__pid(ftrace->evlist->core.threads, i));
377 		if (append_tracing_file("set_ftrace_pid", buf) < 0)
378 			return -1;
379 	}
380 	return 0;
381 }
382 
383 static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
384 {
385 	char *cpumask;
386 	size_t mask_size;
387 	int ret;
388 	int last_cpu;
389 
390 	last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
391 	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
392 	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
393 
394 	cpumask = malloc(mask_size);
395 	if (cpumask == NULL) {
396 		pr_debug("failed to allocate cpu mask\n");
397 		return -1;
398 	}
399 
400 	cpu_map__snprint_mask(cpumap, cpumask, mask_size);
401 
402 	ret = write_tracing_file("tracing_cpumask", cpumask);
403 
404 	free(cpumask);
405 	return ret;
406 }
407 
408 static int set_tracing_cpu(struct perf_ftrace *ftrace)
409 {
410 	struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
411 
412 	if (!target__has_cpu(&ftrace->target))
413 		return 0;
414 
415 	return set_tracing_cpumask(cpumap);
416 }
417 
418 static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
419 {
420 	if (!ftrace->func_stack_trace)
421 		return 0;
422 
423 	if (write_tracing_option_file("func_stack_trace", "1") < 0)
424 		return -1;
425 
426 	return 0;
427 }
428 
429 static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
430 {
431 	if (!ftrace->func_irq_info)
432 		return 0;
433 
434 	if (write_tracing_option_file("irq-info", "1") < 0)
435 		return -1;
436 
437 	return 0;
438 }
439 
440 static int reset_tracing_cpu(void)
441 {
442 	struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus();
443 	int ret;
444 
445 	ret = set_tracing_cpumask(cpumap);
446 	perf_cpu_map__put(cpumap);
447 	return ret;
448 }
449 
450 static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
451 {
452 	struct filter_entry *pos;
453 
454 	list_for_each_entry(pos, funcs, list) {
455 		if (append_tracing_file(filter_file, pos->name) < 0)
456 			return -1;
457 	}
458 
459 	return 0;
460 }
461 
462 static int set_tracing_filters(struct perf_ftrace *ftrace)
463 {
464 	int ret;
465 
466 	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
467 	if (ret < 0)
468 		return ret;
469 
470 	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
471 	if (ret < 0)
472 		return ret;
473 
474 	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
475 	if (ret < 0)
476 		return ret;
477 
478 	/* old kernels do not have this filter */
479 	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
480 
481 	return ret;
482 }
483 
484 static void reset_tracing_filters(void)
485 {
486 	write_tracing_file("set_ftrace_filter", " ");
487 	write_tracing_file("set_ftrace_notrace", " ");
488 	write_tracing_file("set_graph_function", " ");
489 	write_tracing_file("set_graph_notrace", " ");
490 }
491 
492 static int set_tracing_depth(struct perf_ftrace *ftrace)
493 {
494 	if (ftrace->graph_depth == 0)
495 		return 0;
496 
497 	if (ftrace->graph_depth < 0) {
498 		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
499 		return -1;
500 	}
501 
502 	if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
503 		return -1;
504 
505 	return 0;
506 }
507 
508 static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
509 {
510 	int ret;
511 
512 	if (ftrace->percpu_buffer_size == 0)
513 		return 0;
514 
515 	ret = write_tracing_file_int("buffer_size_kb",
516 				     ftrace->percpu_buffer_size / 1024);
517 	if (ret < 0)
518 		return ret;
519 
520 	return 0;
521 }
522 
523 static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
524 {
525 	if (!ftrace->inherit)
526 		return 0;
527 
528 	if (write_tracing_option_file("function-fork", "1") < 0)
529 		return -1;
530 
531 	return 0;
532 }
533 
534 static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
535 {
536 	if (!ftrace->graph_nosleep_time)
537 		return 0;
538 
539 	if (write_tracing_option_file("sleep-time", "0") < 0)
540 		return -1;
541 
542 	return 0;
543 }
544 
545 static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
546 {
547 	if (!ftrace->graph_noirqs)
548 		return 0;
549 
550 	if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
551 		return -1;
552 
553 	return 0;
554 }
555 
556 static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
557 {
558 	if (!ftrace->graph_verbose)
559 		return 0;
560 
561 	if (write_tracing_option_file("funcgraph-proc", "1") < 0)
562 		return -1;
563 
564 	if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
565 		return -1;
566 
567 	if (write_tracing_option_file("latency-format", "1") < 0)
568 		return -1;
569 
570 	return 0;
571 }
572 
573 static int set_tracing_funcgraph_tail(struct perf_ftrace *ftrace)
574 {
575 	if (!ftrace->graph_tail)
576 		return 0;
577 
578 	if (write_tracing_option_file("funcgraph-tail", "1") < 0)
579 		return -1;
580 
581 	return 0;
582 }
583 
584 static int set_tracing_thresh(struct perf_ftrace *ftrace)
585 {
586 	int ret;
587 
588 	if (ftrace->graph_thresh == 0)
589 		return 0;
590 
591 	ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
592 	if (ret < 0)
593 		return ret;
594 
595 	return 0;
596 }
597 
598 static int set_tracing_options(struct perf_ftrace *ftrace)
599 {
600 	if (set_tracing_pid(ftrace) < 0) {
601 		pr_err("failed to set ftrace pid\n");
602 		return -1;
603 	}
604 
605 	if (set_tracing_cpu(ftrace) < 0) {
606 		pr_err("failed to set tracing cpumask\n");
607 		return -1;
608 	}
609 
610 	if (set_tracing_func_stack_trace(ftrace) < 0) {
611 		pr_err("failed to set tracing option func_stack_trace\n");
612 		return -1;
613 	}
614 
615 	if (set_tracing_func_irqinfo(ftrace) < 0) {
616 		pr_err("failed to set tracing option irq-info\n");
617 		return -1;
618 	}
619 
620 	if (set_tracing_filters(ftrace) < 0) {
621 		pr_err("failed to set tracing filters\n");
622 		return -1;
623 	}
624 
625 	if (set_tracing_depth(ftrace) < 0) {
626 		pr_err("failed to set graph depth\n");
627 		return -1;
628 	}
629 
630 	if (set_tracing_percpu_buffer_size(ftrace) < 0) {
631 		pr_err("failed to set tracing per-cpu buffer size\n");
632 		return -1;
633 	}
634 
635 	if (set_tracing_trace_inherit(ftrace) < 0) {
636 		pr_err("failed to set tracing option function-fork\n");
637 		return -1;
638 	}
639 
640 	if (set_tracing_sleep_time(ftrace) < 0) {
641 		pr_err("failed to set tracing option sleep-time\n");
642 		return -1;
643 	}
644 
645 	if (set_tracing_funcgraph_irqs(ftrace) < 0) {
646 		pr_err("failed to set tracing option funcgraph-irqs\n");
647 		return -1;
648 	}
649 
650 	if (set_tracing_funcgraph_verbose(ftrace) < 0) {
651 		pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
652 		return -1;
653 	}
654 
655 	if (set_tracing_thresh(ftrace) < 0) {
656 		pr_err("failed to set tracing thresh\n");
657 		return -1;
658 	}
659 
660 	if (set_tracing_funcgraph_tail(ftrace) < 0) {
661 		pr_err("failed to set tracing option funcgraph-tail\n");
662 		return -1;
663 	}
664 
665 	return 0;
666 }
667 
668 static void select_tracer(struct perf_ftrace *ftrace)
669 {
670 	bool graph = !list_empty(&ftrace->graph_funcs) ||
671 		     !list_empty(&ftrace->nograph_funcs);
672 	bool func = !list_empty(&ftrace->filters) ||
673 		    !list_empty(&ftrace->notrace);
674 
675 	/* The function_graph has priority over function tracer. */
676 	if (graph)
677 		ftrace->tracer = "function_graph";
678 	else if (func)
679 		ftrace->tracer = "function";
680 	/* Otherwise, the default tracer is used. */
681 
682 	pr_debug("%s tracer is used\n", ftrace->tracer);
683 }
684 
685 static int __cmd_ftrace(struct perf_ftrace *ftrace)
686 {
687 	char *trace_file;
688 	int trace_fd;
689 	char buf[4096];
690 	struct pollfd pollfd = {
691 		.events = POLLIN,
692 	};
693 
694 	select_tracer(ftrace);
695 
696 	if (init_tracing_instance() < 0)
697 		goto out;
698 
699 	if (reset_tracing_files(ftrace) < 0) {
700 		pr_err("failed to reset ftrace\n");
701 		goto out_reset;
702 	}
703 
704 	/* reset ftrace buffer */
705 	if (write_tracing_file("trace", "0") < 0)
706 		goto out_reset;
707 
708 	if (set_tracing_options(ftrace) < 0)
709 		goto out_reset;
710 
711 	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
712 		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
713 		goto out_reset;
714 	}
715 
716 	setup_pager();
717 
718 	trace_file = get_tracing_instance_file("trace_pipe");
719 	if (!trace_file) {
720 		pr_err("failed to open trace_pipe\n");
721 		goto out_reset;
722 	}
723 
724 	trace_fd = open(trace_file, O_RDONLY);
725 
726 	put_tracing_file(trace_file);
727 
728 	if (trace_fd < 0) {
729 		pr_err("failed to open trace_pipe\n");
730 		goto out_reset;
731 	}
732 
733 	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
734 	pollfd.fd = trace_fd;
735 
736 	/* display column headers */
737 	read_tracing_file_to_stdout("trace");
738 
739 	if (!ftrace->target.initial_delay) {
740 		if (write_tracing_file("tracing_on", "1") < 0) {
741 			pr_err("can't enable tracing\n");
742 			goto out_close_fd;
743 		}
744 	}
745 
746 	evlist__start_workload(ftrace->evlist);
747 
748 	if (ftrace->target.initial_delay > 0) {
749 		usleep(ftrace->target.initial_delay * 1000);
750 		if (write_tracing_file("tracing_on", "1") < 0) {
751 			pr_err("can't enable tracing\n");
752 			goto out_close_fd;
753 		}
754 	}
755 
756 	while (!done) {
757 		if (poll(&pollfd, 1, -1) < 0)
758 			break;
759 
760 		if (pollfd.revents & POLLIN) {
761 			int n = read(trace_fd, buf, sizeof(buf));
762 			if (n < 0)
763 				break;
764 			if (fwrite(buf, n, 1, stdout) != 1)
765 				break;
766 			/* flush output since stdout is in full buffering mode due to pager */
767 			fflush(stdout);
768 		}
769 	}
770 
771 	write_tracing_file("tracing_on", "0");
772 
773 	if (workload_exec_errno) {
774 		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
775 		/* flush stdout first so below error msg appears at the end. */
776 		fflush(stdout);
777 		pr_err("workload failed: %s\n", emsg);
778 		goto out_close_fd;
779 	}
780 
781 	/* read remaining buffer contents */
782 	while (true) {
783 		int n = read(trace_fd, buf, sizeof(buf));
784 		if (n <= 0)
785 			break;
786 		if (fwrite(buf, n, 1, stdout) != 1)
787 			break;
788 	}
789 
790 out_close_fd:
791 	close(trace_fd);
792 out_reset:
793 	exit_tracing_instance();
794 out:
795 	return (done && !workload_exec_errno) ? 0 : -1;
796 }
797 
798 static void make_histogram(struct perf_ftrace *ftrace, int buckets[],
799 			   char *buf, size_t len, char *linebuf)
800 {
801 	int min_latency = ftrace->min_latency;
802 	int max_latency = ftrace->max_latency;
803 	unsigned int bucket_num = ftrace->bucket_num;
804 	char *p, *q;
805 	char *unit;
806 	double num;
807 	int i;
808 
809 	/* ensure NUL termination */
810 	buf[len] = '\0';
811 
812 	/* handle data line by line */
813 	for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
814 		*q = '\0';
815 		/* move it to the line buffer */
816 		strcat(linebuf, p);
817 
818 		/*
819 		 * parse trace output to get function duration like in
820 		 *
821 		 * # tracer: function_graph
822 		 * #
823 		 * # CPU  DURATION                  FUNCTION CALLS
824 		 * # |     |   |                     |   |   |   |
825 		 *  1) + 10.291 us   |  do_filp_open();
826 		 *  1)   4.889 us    |  do_filp_open();
827 		 *  1)   6.086 us    |  do_filp_open();
828 		 *
829 		 */
830 		if (linebuf[0] == '#')
831 			goto next;
832 
833 		/* ignore CPU */
834 		p = strchr(linebuf, ')');
835 		if (p == NULL)
836 			p = linebuf;
837 
838 		while (*p && !isdigit(*p) && (*p != '|'))
839 			p++;
840 
841 		/* no duration */
842 		if (*p == '\0' || *p == '|')
843 			goto next;
844 
845 		num = strtod(p, &unit);
846 		if (!unit || strncmp(unit, " us", 3))
847 			goto next;
848 
849 		if (ftrace->use_nsec)
850 			num *= 1000;
851 
852 		i = 0;
853 		if (num < min_latency)
854 			goto do_inc;
855 
856 		num -= min_latency;
857 
858 		if (!ftrace->bucket_range) {
859 			i = log2(num);
860 			if (i < 0)
861 				i = 0;
862 		} else {
863 			// Less than 1 unit (ms or ns), or, in the future,
864 			// than the min latency desired.
865 			if (num > 0) // 1st entry: [ 1 unit .. bucket_range units ]
866 				i = num / ftrace->bucket_range + 1;
867 			if (num >= max_latency - min_latency)
868 				i = bucket_num -1;
869 		}
870 		if ((unsigned)i >= bucket_num)
871 			i = bucket_num - 1;
872 
873 		num += min_latency;
874 do_inc:
875 		buckets[i]++;
876 		update_stats(&latency_stats, num);
877 
878 next:
879 		/* empty the line buffer for the next output  */
880 		linebuf[0] = '\0';
881 	}
882 
883 	/* preserve any remaining output (before newline) */
884 	strcat(linebuf, p);
885 }
886 
887 static void display_histogram(struct perf_ftrace *ftrace, int buckets[])
888 {
889 	int min_latency = ftrace->min_latency;
890 	bool use_nsec = ftrace->use_nsec;
891 	unsigned int bucket_num = ftrace->bucket_num;
892 	unsigned int i;
893 	int total = 0;
894 	int bar_total = 46;  /* to fit in 80 column */
895 	char bar[] = "###############################################";
896 	int bar_len;
897 
898 	for (i = 0; i < bucket_num; i++)
899 		total += buckets[i];
900 
901 	if (total == 0) {
902 		printf("No data found\n");
903 		return;
904 	}
905 
906 	printf("# %14s | %10s | %-*s |\n",
907 	       "  DURATION    ", "COUNT", bar_total, "GRAPH");
908 
909 	bar_len = buckets[0] * bar_total / total;
910 
911 	if (!ftrace->hide_empty || buckets[0])
912 		printf("  %4d - %4d %s | %10d | %.*s%*s |\n",
913 		       0, min_latency ?: 1, use_nsec ? "ns" : "us",
914 		       buckets[0], bar_len, bar, bar_total - bar_len, "");
915 
916 	for (i = 1; i < bucket_num - 1; i++) {
917 		unsigned int start, stop;
918 		const char *unit = use_nsec ? "ns" : "us";
919 
920 		if (ftrace->hide_empty && !buckets[i])
921 			continue;
922 		if (!ftrace->bucket_range) {
923 			start = (1 << (i - 1));
924 			stop  = 1 << i;
925 
926 			if (start >= 1024) {
927 				start >>= 10;
928 				stop >>= 10;
929 				unit = use_nsec ? "us" : "ms";
930 			}
931 		} else {
932 			start = (i - 1) * ftrace->bucket_range + min_latency;
933 			stop  = i * ftrace->bucket_range + min_latency;
934 
935 			if (start >= ftrace->max_latency)
936 				break;
937 			if (stop > ftrace->max_latency)
938 				stop = ftrace->max_latency;
939 
940 			if (start >= 1000) {
941 				double dstart = start / 1000.0,
942 				       dstop  = stop / 1000.0;
943 				printf("  %4.2f - %-4.2f", dstart, dstop);
944 				unit = use_nsec ? "us" : "ms";
945 				goto print_bucket_info;
946 			}
947 		}
948 
949 		printf("  %4d - %4d", start, stop);
950 print_bucket_info:
951 		bar_len = buckets[i] * bar_total / total;
952 		printf(" %s | %10d | %.*s%*s |\n", unit, buckets[i], bar_len, bar,
953 		       bar_total - bar_len, "");
954 	}
955 
956 	bar_len = buckets[bucket_num - 1] * bar_total / total;
957 	if (ftrace->hide_empty && !buckets[bucket_num - 1])
958 		goto print_stats;
959 	if (!ftrace->bucket_range) {
960 		printf("  %4d - %-4s %s", 1, "...", use_nsec ? "ms" : "s ");
961 	} else {
962 		unsigned int upper_outlier = (bucket_num - 2) * ftrace->bucket_range + min_latency;
963 		if (upper_outlier > ftrace->max_latency)
964 			upper_outlier = ftrace->max_latency;
965 
966 		if (upper_outlier >= 1000) {
967 			double dstart = upper_outlier / 1000.0;
968 
969 			printf("  %4.2f - %-4s %s", dstart, "...", use_nsec ? "us" : "ms");
970 		} else {
971 			printf("  %4d - %4s %s", upper_outlier, "...", use_nsec ? "ns" : "us");
972 		}
973 	}
974 	printf(" | %10d | %.*s%*s |\n", buckets[bucket_num - 1],
975 	       bar_len, bar, bar_total - bar_len, "");
976 
977 print_stats:
978 	printf("\n# statistics  (in %s)\n", ftrace->use_nsec ? "nsec" : "usec");
979 	printf("  total time: %20.0f\n", latency_stats.mean * latency_stats.n);
980 	printf("    avg time: %20.0f\n", latency_stats.mean);
981 	printf("    max time: %20"PRIu64"\n", latency_stats.max);
982 	printf("    min time: %20"PRIu64"\n", latency_stats.min);
983 	printf("       count: %20.0f\n", latency_stats.n);
984 }
985 
986 static int prepare_func_latency(struct perf_ftrace *ftrace)
987 {
988 	char *trace_file;
989 	int fd;
990 
991 	if (ftrace->target.use_bpf)
992 		return perf_ftrace__latency_prepare_bpf(ftrace);
993 
994 	if (init_tracing_instance() < 0)
995 		return -1;
996 
997 	if (reset_tracing_files(ftrace) < 0) {
998 		pr_err("failed to reset ftrace\n");
999 		return -1;
1000 	}
1001 
1002 	/* reset ftrace buffer */
1003 	if (write_tracing_file("trace", "0") < 0)
1004 		return -1;
1005 
1006 	if (set_tracing_options(ftrace) < 0)
1007 		return -1;
1008 
1009 	/* force to use the function_graph tracer to track duration */
1010 	if (write_tracing_file("current_tracer", "function_graph") < 0) {
1011 		pr_err("failed to set current_tracer to function_graph\n");
1012 		return -1;
1013 	}
1014 
1015 	trace_file = get_tracing_instance_file("trace_pipe");
1016 	if (!trace_file) {
1017 		pr_err("failed to open trace_pipe\n");
1018 		return -1;
1019 	}
1020 
1021 	fd = open(trace_file, O_RDONLY);
1022 	if (fd < 0)
1023 		pr_err("failed to open trace_pipe\n");
1024 
1025 	init_stats(&latency_stats);
1026 
1027 	put_tracing_file(trace_file);
1028 	return fd;
1029 }
1030 
1031 static int start_func_latency(struct perf_ftrace *ftrace)
1032 {
1033 	if (ftrace->target.use_bpf)
1034 		return perf_ftrace__latency_start_bpf(ftrace);
1035 
1036 	if (write_tracing_file("tracing_on", "1") < 0) {
1037 		pr_err("can't enable tracing\n");
1038 		return -1;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static int stop_func_latency(struct perf_ftrace *ftrace)
1045 {
1046 	if (ftrace->target.use_bpf)
1047 		return perf_ftrace__latency_stop_bpf(ftrace);
1048 
1049 	write_tracing_file("tracing_on", "0");
1050 	return 0;
1051 }
1052 
1053 static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
1054 {
1055 	if (ftrace->target.use_bpf)
1056 		return perf_ftrace__latency_read_bpf(ftrace, buckets, &latency_stats);
1057 
1058 	return 0;
1059 }
1060 
1061 static int cleanup_func_latency(struct perf_ftrace *ftrace)
1062 {
1063 	if (ftrace->target.use_bpf)
1064 		return perf_ftrace__latency_cleanup_bpf(ftrace);
1065 
1066 	exit_tracing_instance();
1067 	return 0;
1068 }
1069 
1070 static int __cmd_latency(struct perf_ftrace *ftrace)
1071 {
1072 	int trace_fd;
1073 	char buf[4096];
1074 	char line[256];
1075 	struct pollfd pollfd = {
1076 		.events = POLLIN,
1077 	};
1078 	int *buckets;
1079 
1080 	trace_fd = prepare_func_latency(ftrace);
1081 	if (trace_fd < 0)
1082 		goto out;
1083 
1084 	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
1085 	pollfd.fd = trace_fd;
1086 
1087 	if (start_func_latency(ftrace) < 0)
1088 		goto out;
1089 
1090 	evlist__start_workload(ftrace->evlist);
1091 
1092 	buckets = calloc(ftrace->bucket_num, sizeof(*buckets));
1093 	if (buckets == NULL) {
1094 		pr_err("failed to allocate memory for the buckets\n");
1095 		goto out;
1096 	}
1097 
1098 	line[0] = '\0';
1099 	while (!done) {
1100 		if (poll(&pollfd, 1, -1) < 0)
1101 			break;
1102 
1103 		if (pollfd.revents & POLLIN) {
1104 			int n = read(trace_fd, buf, sizeof(buf) - 1);
1105 			if (n < 0)
1106 				break;
1107 
1108 			make_histogram(ftrace, buckets, buf, n, line);
1109 		}
1110 	}
1111 
1112 	stop_func_latency(ftrace);
1113 
1114 	if (workload_exec_errno) {
1115 		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
1116 		pr_err("workload failed: %s\n", emsg);
1117 		goto out_free_buckets;
1118 	}
1119 
1120 	/* read remaining buffer contents */
1121 	while (!ftrace->target.use_bpf) {
1122 		int n = read(trace_fd, buf, sizeof(buf) - 1);
1123 		if (n <= 0)
1124 			break;
1125 		make_histogram(ftrace, buckets, buf, n, line);
1126 	}
1127 
1128 	read_func_latency(ftrace, buckets);
1129 
1130 	display_histogram(ftrace, buckets);
1131 
1132 out_free_buckets:
1133 	free(buckets);
1134 out:
1135 	close(trace_fd);
1136 	cleanup_func_latency(ftrace);
1137 
1138 	return (done && !workload_exec_errno) ? 0 : -1;
1139 }
1140 
1141 static size_t profile_hash(long func, void *ctx __maybe_unused)
1142 {
1143 	return str_hash((char *)func);
1144 }
1145 
1146 static bool profile_equal(long func1, long func2, void *ctx __maybe_unused)
1147 {
1148 	return !strcmp((char *)func1, (char *)func2);
1149 }
1150 
1151 static int prepare_func_profile(struct perf_ftrace *ftrace)
1152 {
1153 	ftrace->tracer = "function_graph";
1154 	ftrace->graph_tail = 1;
1155 	ftrace->graph_verbose = 0;
1156 
1157 	ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
1158 	if (ftrace->profile_hash == NULL)
1159 		return -ENOMEM;
1160 
1161 	return 0;
1162 }
1163 
1164 /* This is saved in a hashmap keyed by the function name */
1165 struct ftrace_profile_data {
1166 	struct stats st;
1167 };
1168 
1169 static int add_func_duration(struct perf_ftrace *ftrace, char *func, double time_ns)
1170 {
1171 	struct ftrace_profile_data *prof = NULL;
1172 
1173 	if (!hashmap__find(ftrace->profile_hash, func, &prof)) {
1174 		char *key = strdup(func);
1175 
1176 		if (key == NULL)
1177 			return -ENOMEM;
1178 
1179 		prof = zalloc(sizeof(*prof));
1180 		if (prof == NULL) {
1181 			free(key);
1182 			return -ENOMEM;
1183 		}
1184 
1185 		init_stats(&prof->st);
1186 		hashmap__add(ftrace->profile_hash, key, prof);
1187 	}
1188 
1189 	update_stats(&prof->st, time_ns);
1190 	return 0;
1191 }
1192 
1193 /*
1194  * The ftrace function_graph text output normally looks like below:
1195  *
1196  * CPU   DURATION       FUNCTION
1197  *
1198  *  0)               |  syscall_trace_enter.isra.0() {
1199  *  0)               |    __audit_syscall_entry() {
1200  *  0)               |      auditd_test_task() {
1201  *  0)   0.271 us    |        __rcu_read_lock();
1202  *  0)   0.275 us    |        __rcu_read_unlock();
1203  *  0)   1.254 us    |      } /\* auditd_test_task *\/
1204  *  0)   0.279 us    |      ktime_get_coarse_real_ts64();
1205  *  0)   2.227 us    |    } /\* __audit_syscall_entry *\/
1206  *  0)   2.713 us    |  } /\* syscall_trace_enter.isra.0 *\/
1207  *
1208  *  Parse the line and get the duration and function name.
1209  */
1210 static int parse_func_duration(struct perf_ftrace *ftrace, char *line, size_t len)
1211 {
1212 	char *p;
1213 	char *func;
1214 	double duration;
1215 
1216 	/* skip CPU */
1217 	p = strchr(line, ')');
1218 	if (p == NULL)
1219 		return 0;
1220 
1221 	/* get duration */
1222 	p = skip_spaces(p + 1);
1223 
1224 	/* no duration? */
1225 	if (p == NULL || *p == '|')
1226 		return 0;
1227 
1228 	/* skip markers like '*' or '!' for longer than ms */
1229 	if (!isdigit(*p))
1230 		p++;
1231 
1232 	duration = strtod(p, &p);
1233 
1234 	if (strncmp(p, " us", 3)) {
1235 		pr_debug("non-usec time found.. ignoring\n");
1236 		return 0;
1237 	}
1238 
1239 	/*
1240 	 * profile stat keeps the max and min values as integer,
1241 	 * convert to nsec time so that we can have accurate max.
1242 	 */
1243 	duration *= 1000;
1244 
1245 	/* skip to the pipe */
1246 	while (p < line + len && *p != '|')
1247 		p++;
1248 
1249 	if (*p++ != '|')
1250 		return -EINVAL;
1251 
1252 	/* get function name */
1253 	func = skip_spaces(p);
1254 
1255 	/* skip the closing bracket and the start of comment */
1256 	if (*func == '}')
1257 		func += 5;
1258 
1259 	/* remove semi-colon or end of comment at the end */
1260 	p = line + len - 1;
1261 	while (!isalnum(*p) && *p != ']') {
1262 		*p = '\0';
1263 		--p;
1264 	}
1265 
1266 	return add_func_duration(ftrace, func, duration);
1267 }
1268 
1269 enum perf_ftrace_profile_sort_key {
1270 	PFP_SORT_TOTAL = 0,
1271 	PFP_SORT_AVG,
1272 	PFP_SORT_MAX,
1273 	PFP_SORT_COUNT,
1274 	PFP_SORT_NAME,
1275 };
1276 
1277 static enum perf_ftrace_profile_sort_key profile_sort = PFP_SORT_TOTAL;
1278 
1279 static int cmp_profile_data(const void *a, const void *b)
1280 {
1281 	const struct hashmap_entry *e1 = *(const struct hashmap_entry **)a;
1282 	const struct hashmap_entry *e2 = *(const struct hashmap_entry **)b;
1283 	struct ftrace_profile_data *p1 = e1->pvalue;
1284 	struct ftrace_profile_data *p2 = e2->pvalue;
1285 	double v1, v2;
1286 
1287 	switch (profile_sort) {
1288 	case PFP_SORT_NAME:
1289 		return strcmp(e1->pkey, e2->pkey);
1290 	case PFP_SORT_AVG:
1291 		v1 = p1->st.mean;
1292 		v2 = p2->st.mean;
1293 		break;
1294 	case PFP_SORT_MAX:
1295 		v1 = p1->st.max;
1296 		v2 = p2->st.max;
1297 		break;
1298 	case PFP_SORT_COUNT:
1299 		v1 = p1->st.n;
1300 		v2 = p2->st.n;
1301 		break;
1302 	case PFP_SORT_TOTAL:
1303 	default:
1304 		v1 = p1->st.n * p1->st.mean;
1305 		v2 = p2->st.n * p2->st.mean;
1306 		break;
1307 	}
1308 
1309 	if (v1 > v2)
1310 		return -1;
1311 	if (v1 < v2)
1312 		return 1;
1313 	return 0;
1314 }
1315 
1316 static void print_profile_result(struct perf_ftrace *ftrace)
1317 {
1318 	struct hashmap_entry *entry, **profile;
1319 	size_t i, nr, bkt;
1320 
1321 	nr = hashmap__size(ftrace->profile_hash);
1322 	if (nr == 0)
1323 		return;
1324 
1325 	profile = calloc(nr, sizeof(*profile));
1326 	if (profile == NULL) {
1327 		pr_err("failed to allocate memory for the result\n");
1328 		return;
1329 	}
1330 
1331 	i = 0;
1332 	hashmap__for_each_entry(ftrace->profile_hash, entry, bkt)
1333 		profile[i++] = entry;
1334 
1335 	assert(i == nr);
1336 
1337 	//cmp_profile_data(profile[0], profile[1]);
1338 	qsort(profile, nr, sizeof(*profile), cmp_profile_data);
1339 
1340 	printf("# %10s %10s %10s %10s   %s\n",
1341 	       "Total (us)", "Avg (us)", "Max (us)", "Count", "Function");
1342 
1343 	for (i = 0; i < nr; i++) {
1344 		const char *name = profile[i]->pkey;
1345 		struct ftrace_profile_data *p = profile[i]->pvalue;
1346 
1347 		printf("%12.3f %10.3f %6"PRIu64".%03"PRIu64" %10.0f   %s\n",
1348 		       p->st.n * p->st.mean / 1000, p->st.mean / 1000,
1349 		       p->st.max / 1000, p->st.max % 1000, p->st.n, name);
1350 	}
1351 
1352 	free(profile);
1353 
1354 	hashmap__for_each_entry(ftrace->profile_hash, entry, bkt) {
1355 		free((char *)entry->pkey);
1356 		free(entry->pvalue);
1357 	}
1358 
1359 	hashmap__free(ftrace->profile_hash);
1360 	ftrace->profile_hash = NULL;
1361 }
1362 
1363 static int __cmd_profile(struct perf_ftrace *ftrace)
1364 {
1365 	char *trace_file;
1366 	int trace_fd;
1367 	char buf[4096];
1368 	struct io io;
1369 	char *line = NULL;
1370 	size_t line_len = 0;
1371 
1372 	if (prepare_func_profile(ftrace) < 0) {
1373 		pr_err("failed to prepare func profiler\n");
1374 		goto out;
1375 	}
1376 
1377 	if (init_tracing_instance() < 0)
1378 		goto out;
1379 
1380 	if (reset_tracing_files(ftrace) < 0) {
1381 		pr_err("failed to reset ftrace\n");
1382 		goto out_reset;
1383 	}
1384 
1385 	/* reset ftrace buffer */
1386 	if (write_tracing_file("trace", "0") < 0)
1387 		goto out_reset;
1388 
1389 	if (set_tracing_options(ftrace) < 0)
1390 		goto out_reset;
1391 
1392 	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
1393 		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
1394 		goto out_reset;
1395 	}
1396 
1397 	setup_pager();
1398 
1399 	trace_file = get_tracing_instance_file("trace_pipe");
1400 	if (!trace_file) {
1401 		pr_err("failed to open trace_pipe\n");
1402 		goto out_reset;
1403 	}
1404 
1405 	trace_fd = open(trace_file, O_RDONLY);
1406 
1407 	put_tracing_file(trace_file);
1408 
1409 	if (trace_fd < 0) {
1410 		pr_err("failed to open trace_pipe\n");
1411 		goto out_reset;
1412 	}
1413 
1414 	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
1415 
1416 	if (write_tracing_file("tracing_on", "1") < 0) {
1417 		pr_err("can't enable tracing\n");
1418 		goto out_close_fd;
1419 	}
1420 
1421 	evlist__start_workload(ftrace->evlist);
1422 
1423 	io__init(&io, trace_fd, buf, sizeof(buf));
1424 	io.timeout_ms = -1;
1425 
1426 	while (!done && !io.eof) {
1427 		if (io__getline(&io, &line, &line_len) < 0)
1428 			break;
1429 
1430 		if (parse_func_duration(ftrace, line, line_len) < 0)
1431 			break;
1432 	}
1433 
1434 	write_tracing_file("tracing_on", "0");
1435 
1436 	if (workload_exec_errno) {
1437 		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
1438 		/* flush stdout first so below error msg appears at the end. */
1439 		fflush(stdout);
1440 		pr_err("workload failed: %s\n", emsg);
1441 		goto out_free_line;
1442 	}
1443 
1444 	/* read remaining buffer contents */
1445 	io.timeout_ms = 0;
1446 	while (!io.eof) {
1447 		if (io__getline(&io, &line, &line_len) < 0)
1448 			break;
1449 
1450 		if (parse_func_duration(ftrace, line, line_len) < 0)
1451 			break;
1452 	}
1453 
1454 	print_profile_result(ftrace);
1455 
1456 out_free_line:
1457 	free(line);
1458 out_close_fd:
1459 	close(trace_fd);
1460 out_reset:
1461 	exit_tracing_instance();
1462 out:
1463 	return (done && !workload_exec_errno) ? 0 : -1;
1464 }
1465 
1466 static int perf_ftrace_config(const char *var, const char *value, void *cb)
1467 {
1468 	struct perf_ftrace *ftrace = cb;
1469 
1470 	if (!strstarts(var, "ftrace."))
1471 		return 0;
1472 
1473 	if (strcmp(var, "ftrace.tracer"))
1474 		return -1;
1475 
1476 	if (!strcmp(value, "function_graph") ||
1477 	    !strcmp(value, "function")) {
1478 		ftrace->tracer = value;
1479 		return 0;
1480 	}
1481 
1482 	pr_err("Please select \"function_graph\" (default) or \"function\"\n");
1483 	return -1;
1484 }
1485 
1486 static void list_function_cb(char *str, void *arg)
1487 {
1488 	struct strfilter *filter = (struct strfilter *)arg;
1489 
1490 	if (strfilter__compare(filter, str))
1491 		printf("%s", str);
1492 }
1493 
1494 static int opt_list_avail_functions(const struct option *opt __maybe_unused,
1495 				    const char *str, int unset)
1496 {
1497 	struct strfilter *filter;
1498 	const char *err = NULL;
1499 	int ret;
1500 
1501 	if (unset || !str)
1502 		return -1;
1503 
1504 	filter = strfilter__new(str, &err);
1505 	if (!filter)
1506 		return err ? -EINVAL : -ENOMEM;
1507 
1508 	ret = strfilter__or(filter, str, &err);
1509 	if (ret == -EINVAL) {
1510 		pr_err("Filter parse error at %td.\n", err - str + 1);
1511 		pr_err("Source: \"%s\"\n", str);
1512 		pr_err("         %*c\n", (int)(err - str + 1), '^');
1513 		strfilter__delete(filter);
1514 		return ret;
1515 	}
1516 
1517 	ret = read_tracing_file_by_line("available_filter_functions",
1518 					list_function_cb, filter);
1519 	strfilter__delete(filter);
1520 	if (ret < 0)
1521 		return ret;
1522 
1523 	exit(0);
1524 }
1525 
1526 static int parse_filter_func(const struct option *opt, const char *str,
1527 			     int unset __maybe_unused)
1528 {
1529 	struct list_head *head = opt->value;
1530 	struct filter_entry *entry;
1531 
1532 	entry = malloc(sizeof(*entry) + strlen(str) + 1);
1533 	if (entry == NULL)
1534 		return -ENOMEM;
1535 
1536 	strcpy(entry->name, str);
1537 	list_add_tail(&entry->list, head);
1538 
1539 	return 0;
1540 }
1541 
1542 static void delete_filter_func(struct list_head *head)
1543 {
1544 	struct filter_entry *pos, *tmp;
1545 
1546 	list_for_each_entry_safe(pos, tmp, head, list) {
1547 		list_del_init(&pos->list);
1548 		free(pos);
1549 	}
1550 }
1551 
1552 static int parse_buffer_size(const struct option *opt,
1553 			     const char *str, int unset)
1554 {
1555 	unsigned long *s = (unsigned long *)opt->value;
1556 	static struct parse_tag tags_size[] = {
1557 		{ .tag  = 'B', .mult = 1       },
1558 		{ .tag  = 'K', .mult = 1 << 10 },
1559 		{ .tag  = 'M', .mult = 1 << 20 },
1560 		{ .tag  = 'G', .mult = 1 << 30 },
1561 		{ .tag  = 0 },
1562 	};
1563 	unsigned long val;
1564 
1565 	if (unset) {
1566 		*s = 0;
1567 		return 0;
1568 	}
1569 
1570 	val = parse_tag_value(str, tags_size);
1571 	if (val != (unsigned long) -1) {
1572 		if (val < 1024) {
1573 			pr_err("buffer size too small, must larger than 1KB.");
1574 			return -1;
1575 		}
1576 		*s = val;
1577 		return 0;
1578 	}
1579 
1580 	return -1;
1581 }
1582 
1583 static int parse_func_tracer_opts(const struct option *opt,
1584 				  const char *str, int unset)
1585 {
1586 	int ret;
1587 	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1588 	struct sublevel_option func_tracer_opts[] = {
1589 		{ .name = "call-graph",	.value_ptr = &ftrace->func_stack_trace },
1590 		{ .name = "irq-info",	.value_ptr = &ftrace->func_irq_info },
1591 		{ .name = NULL, }
1592 	};
1593 
1594 	if (unset)
1595 		return 0;
1596 
1597 	ret = perf_parse_sublevel_options(str, func_tracer_opts);
1598 	if (ret)
1599 		return ret;
1600 
1601 	return 0;
1602 }
1603 
1604 static int parse_graph_tracer_opts(const struct option *opt,
1605 				  const char *str, int unset)
1606 {
1607 	int ret;
1608 	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
1609 	struct sublevel_option graph_tracer_opts[] = {
1610 		{ .name = "nosleep-time",	.value_ptr = &ftrace->graph_nosleep_time },
1611 		{ .name = "noirqs",		.value_ptr = &ftrace->graph_noirqs },
1612 		{ .name = "verbose",		.value_ptr = &ftrace->graph_verbose },
1613 		{ .name = "thresh",		.value_ptr = &ftrace->graph_thresh },
1614 		{ .name = "depth",		.value_ptr = &ftrace->graph_depth },
1615 		{ .name = "tail",		.value_ptr = &ftrace->graph_tail },
1616 		{ .name = NULL, }
1617 	};
1618 
1619 	if (unset)
1620 		return 0;
1621 
1622 	ret = perf_parse_sublevel_options(str, graph_tracer_opts);
1623 	if (ret)
1624 		return ret;
1625 
1626 	return 0;
1627 }
1628 
1629 static int parse_sort_key(const struct option *opt, const char *str, int unset)
1630 {
1631 	enum perf_ftrace_profile_sort_key *key = (void *)opt->value;
1632 
1633 	if (unset)
1634 		return 0;
1635 
1636 	if (!strcmp(str, "total"))
1637 		*key = PFP_SORT_TOTAL;
1638 	else if (!strcmp(str, "avg"))
1639 		*key = PFP_SORT_AVG;
1640 	else if (!strcmp(str, "max"))
1641 		*key = PFP_SORT_MAX;
1642 	else if (!strcmp(str, "count"))
1643 		*key = PFP_SORT_COUNT;
1644 	else if (!strcmp(str, "name"))
1645 		*key = PFP_SORT_NAME;
1646 	else {
1647 		pr_err("Unknown sort key: %s\n", str);
1648 		return -1;
1649 	}
1650 	return 0;
1651 }
1652 
1653 enum perf_ftrace_subcommand {
1654 	PERF_FTRACE_NONE,
1655 	PERF_FTRACE_TRACE,
1656 	PERF_FTRACE_LATENCY,
1657 	PERF_FTRACE_PROFILE,
1658 };
1659 
1660 int cmd_ftrace(int argc, const char **argv)
1661 {
1662 	int ret;
1663 	int (*cmd_func)(struct perf_ftrace *) = NULL;
1664 	struct perf_ftrace ftrace = {
1665 		.tracer = DEFAULT_TRACER,
1666 		.target = { .uid = UINT_MAX, },
1667 	};
1668 	const struct option common_options[] = {
1669 	OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
1670 		   "Trace on existing process id"),
1671 	/* TODO: Add short option -t after -t/--tracer can be removed. */
1672 	OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
1673 		   "Trace on existing thread id (exclusive to --pid)"),
1674 	OPT_INCR('v', "verbose", &verbose,
1675 		 "Be more verbose"),
1676 	OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
1677 		    "System-wide collection from all CPUs"),
1678 	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
1679 		    "List of cpus to monitor"),
1680 	OPT_END()
1681 	};
1682 	const struct option ftrace_options[] = {
1683 	OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
1684 		   "Tracer to use: function_graph(default) or function"),
1685 	OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
1686 			     "Show available functions to filter",
1687 			     opt_list_avail_functions, "*"),
1688 	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1689 		     "Trace given functions using function tracer",
1690 		     parse_filter_func),
1691 	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
1692 		     "Do not trace given functions", parse_filter_func),
1693 	OPT_CALLBACK(0, "func-opts", &ftrace, "options",
1694 		     "Function tracer options, available options: call-graph,irq-info",
1695 		     parse_func_tracer_opts),
1696 	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
1697 		     "Trace given functions using function_graph tracer",
1698 		     parse_filter_func),
1699 	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
1700 		     "Set nograph filter on given functions", parse_filter_func),
1701 	OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
1702 		     "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
1703 		     parse_graph_tracer_opts),
1704 	OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
1705 		     "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
1706 	OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
1707 		    "Trace children processes"),
1708 	OPT_INTEGER('D', "delay", &ftrace.target.initial_delay,
1709 		    "Number of milliseconds to wait before starting tracing after program start"),
1710 	OPT_PARENT(common_options),
1711 	};
1712 	const struct option latency_options[] = {
1713 	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1714 		     "Show latency of given function", parse_filter_func),
1715 #ifdef HAVE_BPF_SKEL
1716 	OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
1717 		    "Use BPF to measure function latency"),
1718 #endif
1719 	OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
1720 		    "Use nano-second histogram"),
1721 	OPT_UINTEGER(0, "bucket-range", &ftrace.bucket_range,
1722 		    "Bucket range in ms or ns (-n/--use-nsec), default is log2() mode"),
1723 	OPT_UINTEGER(0, "min-latency", &ftrace.min_latency,
1724 		    "Minimum latency (1st bucket). Works only with --bucket-range."),
1725 	OPT_UINTEGER(0, "max-latency", &ftrace.max_latency,
1726 		    "Maximum latency (last bucket). Works only with --bucket-range."),
1727 	OPT_BOOLEAN(0, "hide-empty", &ftrace.hide_empty,
1728 		    "Hide empty buckets in the histogram"),
1729 	OPT_PARENT(common_options),
1730 	};
1731 	const struct option profile_options[] = {
1732 	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
1733 		     "Trace given functions using function tracer",
1734 		     parse_filter_func),
1735 	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
1736 		     "Do not trace given functions", parse_filter_func),
1737 	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
1738 		     "Trace given functions using function_graph tracer",
1739 		     parse_filter_func),
1740 	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
1741 		     "Set nograph filter on given functions", parse_filter_func),
1742 	OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
1743 		     "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
1744 	OPT_CALLBACK('s', "sort", &profile_sort, "key",
1745 		     "Sort result by key: total (default), avg, max, count, name.",
1746 		     parse_sort_key),
1747 	OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
1748 		     "Graph tracer options, available options: nosleep-time,noirqs,thresh=<n>,depth=<n>",
1749 		     parse_graph_tracer_opts),
1750 	OPT_PARENT(common_options),
1751 	};
1752 	const struct option *options = ftrace_options;
1753 
1754 	const char * const ftrace_usage[] = {
1755 		"perf ftrace [<options>] [<command>]",
1756 		"perf ftrace [<options>] -- [<command>] [<options>]",
1757 		"perf ftrace {trace|latency|profile} [<options>] [<command>]",
1758 		"perf ftrace {trace|latency|profile} [<options>] -- [<command>] [<options>]",
1759 		NULL
1760 	};
1761 	enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE;
1762 
1763 	INIT_LIST_HEAD(&ftrace.filters);
1764 	INIT_LIST_HEAD(&ftrace.notrace);
1765 	INIT_LIST_HEAD(&ftrace.graph_funcs);
1766 	INIT_LIST_HEAD(&ftrace.nograph_funcs);
1767 
1768 	signal(SIGINT, sig_handler);
1769 	signal(SIGUSR1, sig_handler);
1770 	signal(SIGCHLD, sig_handler);
1771 	signal(SIGPIPE, sig_handler);
1772 
1773 	if (!check_ftrace_capable())
1774 		return -1;
1775 
1776 	if (!is_ftrace_supported()) {
1777 		pr_err("ftrace is not supported on this system\n");
1778 		return -ENOTSUP;
1779 	}
1780 
1781 	ret = perf_config(perf_ftrace_config, &ftrace);
1782 	if (ret < 0)
1783 		return -1;
1784 
1785 	if (argc > 1) {
1786 		if (!strcmp(argv[1], "trace")) {
1787 			subcmd = PERF_FTRACE_TRACE;
1788 		} else if (!strcmp(argv[1], "latency")) {
1789 			subcmd = PERF_FTRACE_LATENCY;
1790 			options = latency_options;
1791 		} else if (!strcmp(argv[1], "profile")) {
1792 			subcmd = PERF_FTRACE_PROFILE;
1793 			options = profile_options;
1794 		}
1795 
1796 		if (subcmd != PERF_FTRACE_NONE) {
1797 			argc--;
1798 			argv++;
1799 		}
1800 	}
1801 	/* for backward compatibility */
1802 	if (subcmd == PERF_FTRACE_NONE)
1803 		subcmd = PERF_FTRACE_TRACE;
1804 
1805 	argc = parse_options(argc, argv, options, ftrace_usage,
1806 			    PARSE_OPT_STOP_AT_NON_OPTION);
1807 	if (argc < 0) {
1808 		ret = -EINVAL;
1809 		goto out_delete_filters;
1810 	}
1811 
1812 	/* Make system wide (-a) the default target. */
1813 	if (!argc && target__none(&ftrace.target))
1814 		ftrace.target.system_wide = true;
1815 
1816 	switch (subcmd) {
1817 	case PERF_FTRACE_TRACE:
1818 		cmd_func = __cmd_ftrace;
1819 		break;
1820 	case PERF_FTRACE_LATENCY:
1821 		if (list_empty(&ftrace.filters)) {
1822 			pr_err("Should provide a function to measure\n");
1823 			parse_options_usage(ftrace_usage, options, "T", 1);
1824 			ret = -EINVAL;
1825 			goto out_delete_filters;
1826 		}
1827 		if (!ftrace.bucket_range && ftrace.min_latency) {
1828 			pr_err("--min-latency works only with --bucket-range\n");
1829 			parse_options_usage(ftrace_usage, options,
1830 					    "min-latency", /*short_opt=*/false);
1831 			ret = -EINVAL;
1832 			goto out_delete_filters;
1833 		}
1834 		if (ftrace.bucket_range && !ftrace.min_latency) {
1835 			/* default min latency should be the bucket range */
1836 			ftrace.min_latency = ftrace.bucket_range;
1837 		}
1838 		if (!ftrace.bucket_range && ftrace.max_latency) {
1839 			pr_err("--max-latency works only with --bucket-range\n");
1840 			parse_options_usage(ftrace_usage, options,
1841 					    "max-latency", /*short_opt=*/false);
1842 			ret = -EINVAL;
1843 			goto out_delete_filters;
1844 		}
1845 		if (ftrace.bucket_range && ftrace.max_latency &&
1846 		    ftrace.max_latency < ftrace.min_latency + ftrace.bucket_range) {
1847 			/* we need at least 1 bucket excluding min and max buckets */
1848 			pr_err("--max-latency must be larger than min-latency + bucket-range\n");
1849 			parse_options_usage(ftrace_usage, options,
1850 					    "max-latency", /*short_opt=*/false);
1851 			ret = -EINVAL;
1852 			goto out_delete_filters;
1853 		}
1854 		/* set default unless max_latency is set and valid */
1855 		ftrace.bucket_num = NUM_BUCKET;
1856 		if (ftrace.bucket_range) {
1857 			if (ftrace.max_latency)
1858 				ftrace.bucket_num = (ftrace.max_latency - ftrace.min_latency) /
1859 							ftrace.bucket_range + 2;
1860 			else
1861 				/* default max latency should depend on bucket range and num_buckets */
1862 				ftrace.max_latency = (NUM_BUCKET - 2) * ftrace.bucket_range +
1863 							ftrace.min_latency;
1864 		}
1865 		cmd_func = __cmd_latency;
1866 		break;
1867 	case PERF_FTRACE_PROFILE:
1868 		cmd_func = __cmd_profile;
1869 		break;
1870 	case PERF_FTRACE_NONE:
1871 	default:
1872 		pr_err("Invalid subcommand\n");
1873 		ret = -EINVAL;
1874 		goto out_delete_filters;
1875 	}
1876 
1877 	ret = target__validate(&ftrace.target);
1878 	if (ret) {
1879 		char errbuf[512];
1880 
1881 		target__strerror(&ftrace.target, ret, errbuf, 512);
1882 		pr_err("%s\n", errbuf);
1883 		goto out_delete_filters;
1884 	}
1885 
1886 	ftrace.evlist = evlist__new();
1887 	if (ftrace.evlist == NULL) {
1888 		ret = -ENOMEM;
1889 		goto out_delete_filters;
1890 	}
1891 
1892 	ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
1893 	if (ret < 0)
1894 		goto out_delete_evlist;
1895 
1896 	if (argc) {
1897 		ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target,
1898 					       argv, false,
1899 					       ftrace__workload_exec_failed_signal);
1900 		if (ret < 0)
1901 			goto out_delete_evlist;
1902 	}
1903 
1904 	ret = cmd_func(&ftrace);
1905 
1906 out_delete_evlist:
1907 	evlist__delete(ftrace.evlist);
1908 
1909 out_delete_filters:
1910 	delete_filter_func(&ftrace.filters);
1911 	delete_filter_func(&ftrace.notrace);
1912 	delete_filter_func(&ftrace.graph_funcs);
1913 	delete_filter_func(&ftrace.nograph_funcs);
1914 
1915 	return ret;
1916 }
1917