xref: /linux/kernel/trace/trace_functions_graph.c (revision 0771cee974607ffcf19ff6022f971865db8e0b4a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions might be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 /* Do not record function time when task is sleeping */
23 int fgraph_no_sleep_time;
24 
25 struct fgraph_cpu_data {
26 	pid_t		last_pid;
27 	int		depth;
28 	int		depth_irq;
29 	int		ignore;
30 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
31 };
32 
33 struct fgraph_ent_args {
34 	struct ftrace_graph_ent_entry	ent;
35 	/* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */
36 	unsigned long			args[FTRACE_REGS_MAX_ARGS];
37 };
38 
39 struct fgraph_retaddr_ent_args {
40 	struct fgraph_retaddr_ent_entry	ent;
41 	/* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */
42 	unsigned long			args[FTRACE_REGS_MAX_ARGS];
43 };
44 
45 struct fgraph_data {
46 	struct fgraph_cpu_data __percpu *cpu_data;
47 
48 	/* Place to preserve last processed entry. */
49 	union {
50 		struct fgraph_ent_args		ent;
51 		struct fgraph_retaddr_ent_args	rent;
52 	};
53 	struct ftrace_graph_ret_entry	ret;
54 	int				failed;
55 	int				cpu;
56 };
57 
58 #define TRACE_GRAPH_INDENT	2
59 
60 unsigned int fgraph_max_depth;
61 
62 static struct tracer_opt trace_opts[] = {
63 	/* Display overruns? (for self-debug purpose) */
64 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
65 	/* Display CPU ? */
66 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
67 	/* Display Overhead ? */
68 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
69 	/* Display proc name/pid */
70 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
71 	/* Display duration of execution */
72 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
73 	/* Display absolute time of an entry */
74 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
75 	/* Display interrupts */
76 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
77 	/* Display function name after trailing } */
78 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
79 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
80 	/* Display function return value ? */
81 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
82 	/* Display function return value in hexadecimal format ? */
83 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
84 #endif
85 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
86 	/* Display function return address ? */
87 	{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
88 #endif
89 #ifdef CONFIG_FUNCTION_TRACE_ARGS
90 	/* Display function arguments ? */
91 	{ TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) },
92 #endif
93 	/* Include sleep time (scheduled out) between entry and return */
94 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
95 
96 	{ } /* Empty entry */
97 };
98 
99 static struct tracer_flags tracer_flags = {
100 	/* Don't display overruns, proc, or tail by default */
101 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
102 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
103 	       TRACE_GRAPH_SLEEP_TIME,
104 	.opts = trace_opts
105 };
106 
tracer_flags_is_set(struct trace_array * tr,u32 flags)107 static bool tracer_flags_is_set(struct trace_array *tr, u32 flags)
108 {
109 	return (tr->current_trace_flags->val & flags) == flags;
110 }
111 
112 /*
113  * DURATION column is being also used to display IRQ signs,
114  * following values are used by print_graph_irq and others
115  * to fill in space into DURATION column.
116  */
117 enum {
118 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
119 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
120 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
121 };
122 
123 static void
124 print_graph_duration(struct trace_array *tr, unsigned long long duration,
125 		     struct trace_seq *s, u32 flags);
126 
__graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,struct ftrace_regs * fregs)127 static int __graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
128 			 unsigned int trace_ctx, struct ftrace_regs *fregs)
129 {
130 	struct ring_buffer_event *event;
131 	struct trace_buffer *buffer = tr->array_buffer.buffer;
132 	struct ftrace_graph_ent_entry *entry;
133 	int size;
134 
135 	/* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
136 	size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
137 
138 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, size, trace_ctx);
139 	if (!event)
140 		return 0;
141 
142 	entry = ring_buffer_event_data(event);
143 	entry->graph_ent = *trace;
144 
145 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
146 	if (fregs) {
147 		for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
148 			entry->args[i] = ftrace_regs_get_argument(fregs, i);
149 	}
150 #endif
151 
152 	trace_buffer_unlock_commit_nostack(buffer, event);
153 
154 	return 1;
155 }
156 
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx)157 int __trace_graph_entry(struct trace_array *tr,
158 				struct ftrace_graph_ent *trace,
159 				unsigned int trace_ctx)
160 {
161 	return __graph_entry(tr, trace, trace_ctx, NULL);
162 }
163 
164 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
__trace_graph_retaddr_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,unsigned long retaddr,struct ftrace_regs * fregs)165 int __trace_graph_retaddr_entry(struct trace_array *tr,
166 				struct ftrace_graph_ent *trace,
167 				unsigned int trace_ctx,
168 				unsigned long retaddr,
169 				struct ftrace_regs *fregs)
170 {
171 	struct ring_buffer_event *event;
172 	struct trace_buffer *buffer = tr->array_buffer.buffer;
173 	struct fgraph_retaddr_ent_entry *entry;
174 	int size;
175 
176 	/* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
177 	size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
178 
179 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT,
180 					  size, trace_ctx);
181 	if (!event)
182 		return 0;
183 	entry	= ring_buffer_event_data(event);
184 	entry->graph_rent.ent = *trace;
185 	entry->graph_rent.retaddr = retaddr;
186 
187 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
188 	if (fregs) {
189 		for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
190 			entry->args[i] = ftrace_regs_get_argument(fregs, i);
191 	}
192 #endif
193 
194 	trace_buffer_unlock_commit_nostack(buffer, event);
195 
196 	return 1;
197 }
198 #else
__trace_graph_retaddr_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,unsigned long retaddr,struct ftrace_regs * fregs)199 int __trace_graph_retaddr_entry(struct trace_array *tr,
200 				struct ftrace_graph_ent *trace,
201 				unsigned int trace_ctx,
202 				unsigned long retaddr,
203 				struct ftrace_regs *fregs)
204 {
205 	return 1;
206 }
207 #endif
208 
ftrace_graph_ignore_irqs(struct trace_array * tr)209 static inline int ftrace_graph_ignore_irqs(struct trace_array *tr)
210 {
211 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
212 		return 0;
213 
214 	if (tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
215 		return 0;
216 
217 	return in_hardirq();
218 }
219 
220 struct fgraph_times {
221 	unsigned long long		calltime;
222 	unsigned long long		sleeptime; /* may be optional! */
223 };
224 
graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)225 static int graph_entry(struct ftrace_graph_ent *trace,
226 		       struct fgraph_ops *gops,
227 		       struct ftrace_regs *fregs)
228 {
229 	unsigned long *task_var = fgraph_get_task_var(gops);
230 	struct trace_array *tr = gops->private;
231 	struct fgraph_times *ftimes;
232 	unsigned int trace_ctx;
233 	int ret = 0;
234 
235 	if (*task_var & TRACE_GRAPH_NOTRACE)
236 		return 0;
237 
238 	/*
239 	 * Do not trace a function if it's filtered by set_graph_notrace.
240 	 * Make the index of ret stack negative to indicate that it should
241 	 * ignore further functions.  But it needs its own ret stack entry
242 	 * to recover the original index in order to continue tracing after
243 	 * returning from the function.
244 	 */
245 	if (ftrace_graph_notrace_addr(trace->func)) {
246 		*task_var |= TRACE_GRAPH_NOTRACE;
247 		/*
248 		 * Need to return 1 to have the return called
249 		 * that will clear the NOTRACE bit.
250 		 */
251 		return 1;
252 	}
253 
254 	if (ftrace_graph_ignore_func(gops, trace))
255 		return 0;
256 
257 	if (ftrace_graph_ignore_irqs(tr))
258 		return 0;
259 
260 	if (fgraph_no_sleep_time &&
261 	    !tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME)) {
262 		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
263 		if (ftimes)
264 			ftimes->sleeptime = current->ftrace_sleeptime;
265 	} else {
266 		/* Only need to record the calltime */
267 		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
268 	}
269 	if (!ftimes)
270 		return 0;
271 
272 	ftimes->calltime = trace_clock_local();
273 
274 	/*
275 	 * Stop here if tracing_threshold is set. We only write function return
276 	 * events to the ring buffer.
277 	 */
278 	if (tracing_thresh)
279 		return 1;
280 
281 	trace_ctx = tracing_gen_ctx();
282 	if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
283 	    tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_RETADDR)) {
284 		unsigned long retaddr = ftrace_graph_top_ret_addr(current);
285 		ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx,
286 						  retaddr, fregs);
287 	} else {
288 		ret = __graph_entry(tr, trace, trace_ctx, fregs);
289 	}
290 
291 	return ret;
292 }
293 
trace_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)294 int trace_graph_entry(struct ftrace_graph_ent *trace,
295 		      struct fgraph_ops *gops,
296 		      struct ftrace_regs *fregs)
297 {
298 	return graph_entry(trace, gops, NULL);
299 }
300 
trace_graph_entry_args(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)301 static int trace_graph_entry_args(struct ftrace_graph_ent *trace,
302 				  struct fgraph_ops *gops,
303 				  struct ftrace_regs *fregs)
304 {
305 	return graph_entry(trace, gops, fregs);
306 }
307 
308 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned int trace_ctx)309 __trace_graph_function(struct trace_array *tr,
310 		unsigned long ip, unsigned int trace_ctx)
311 {
312 	u64 time = trace_clock_local();
313 	struct ftrace_graph_ent ent = {
314 		.func  = ip,
315 		.depth = 0,
316 	};
317 	struct ftrace_graph_ret ret = {
318 		.func     = ip,
319 		.depth    = 0,
320 	};
321 
322 	__trace_graph_entry(tr, &ent, trace_ctx);
323 	__trace_graph_return(tr, &ret, trace_ctx, time, time);
324 }
325 
326 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)327 trace_graph_function(struct trace_array *tr,
328 		unsigned long ip, unsigned long parent_ip,
329 		unsigned int trace_ctx)
330 {
331 	__trace_graph_function(tr, ip, trace_ctx);
332 }
333 
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned int trace_ctx,u64 calltime,u64 rettime)334 void __trace_graph_return(struct trace_array *tr,
335 			  struct ftrace_graph_ret *trace,
336 			  unsigned int trace_ctx,
337 			  u64 calltime, u64 rettime)
338 {
339 	struct ring_buffer_event *event;
340 	struct trace_buffer *buffer = tr->array_buffer.buffer;
341 	struct ftrace_graph_ret_entry *entry;
342 
343 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
344 					  sizeof(*entry), trace_ctx);
345 	if (!event)
346 		return;
347 	entry	= ring_buffer_event_data(event);
348 	entry->ret				= *trace;
349 	entry->calltime				= calltime;
350 	entry->rettime				= rettime;
351 	trace_buffer_unlock_commit_nostack(buffer, event);
352 }
353 
handle_nosleeptime(struct trace_array * tr,struct ftrace_graph_ret * trace,struct fgraph_times * ftimes,int size)354 static void handle_nosleeptime(struct trace_array *tr,
355 			       struct ftrace_graph_ret *trace,
356 			       struct fgraph_times *ftimes,
357 			       int size)
358 {
359 	if (size < sizeof(*ftimes))
360 		return;
361 
362 	if (!fgraph_no_sleep_time || tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
363 		return;
364 
365 	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
366 }
367 
trace_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)368 void trace_graph_return(struct ftrace_graph_ret *trace,
369 			struct fgraph_ops *gops, struct ftrace_regs *fregs)
370 {
371 	unsigned long *task_var = fgraph_get_task_var(gops);
372 	struct trace_array *tr = gops->private;
373 	struct fgraph_times *ftimes;
374 	unsigned int trace_ctx;
375 	u64 calltime, rettime;
376 	int size;
377 
378 	rettime = trace_clock_local();
379 
380 	ftrace_graph_addr_finish(gops, trace);
381 
382 	if (*task_var & TRACE_GRAPH_NOTRACE) {
383 		*task_var &= ~TRACE_GRAPH_NOTRACE;
384 		return;
385 	}
386 
387 	ftimes = fgraph_retrieve_data(gops->idx, &size);
388 	if (!ftimes)
389 		return;
390 
391 	handle_nosleeptime(tr, trace, ftimes, size);
392 
393 	calltime = ftimes->calltime;
394 
395 	trace_ctx = tracing_gen_ctx();
396 	__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
397 }
398 
trace_graph_thresh_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)399 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
400 				      struct fgraph_ops *gops,
401 				      struct ftrace_regs *fregs)
402 {
403 	struct fgraph_times *ftimes;
404 	struct trace_array *tr;
405 	int size;
406 
407 	ftrace_graph_addr_finish(gops, trace);
408 
409 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
410 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
411 		return;
412 	}
413 
414 	ftimes = fgraph_retrieve_data(gops->idx, &size);
415 	if (!ftimes)
416 		return;
417 
418 	tr = gops->private;
419 	handle_nosleeptime(tr, trace, ftimes, size);
420 
421 	if (tracing_thresh &&
422 	    (trace_clock_local() - ftimes->calltime < tracing_thresh))
423 		return;
424 	else
425 		trace_graph_return(trace, gops, fregs);
426 }
427 
428 static struct fgraph_ops funcgraph_ops = {
429 	.entryfunc = &trace_graph_entry,
430 	.retfunc = &trace_graph_return,
431 };
432 
allocate_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)433 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
434 {
435 	struct fgraph_ops *gops;
436 
437 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
438 	if (!gops)
439 		return -ENOMEM;
440 
441 	gops->entryfunc = &trace_graph_entry;
442 	gops->retfunc = &trace_graph_return;
443 
444 	tr->gops = gops;
445 	gops->private = tr;
446 
447 	fgraph_init_ops(&gops->ops, ops);
448 
449 	return 0;
450 }
451 
free_fgraph_ops(struct trace_array * tr)452 void free_fgraph_ops(struct trace_array *tr)
453 {
454 	kfree(tr->gops);
455 }
456 
init_array_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)457 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
458 {
459 	tr->gops = &funcgraph_ops;
460 	funcgraph_ops.private = tr;
461 	fgraph_init_ops(&tr->gops->ops, ops);
462 }
463 
graph_trace_init(struct trace_array * tr)464 static int graph_trace_init(struct trace_array *tr)
465 {
466 	int ret;
467 
468 	if (tracer_flags_is_set(tr, TRACE_GRAPH_ARGS))
469 		tr->gops->entryfunc = trace_graph_entry_args;
470 	else
471 		tr->gops->entryfunc = trace_graph_entry;
472 
473 	if (tracing_thresh)
474 		tr->gops->retfunc = trace_graph_thresh_return;
475 	else
476 		tr->gops->retfunc = trace_graph_return;
477 
478 	if (!tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
479 		ftrace_graph_skip_irqs++;
480 
481 	if (!tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
482 		fgraph_no_sleep_time++;
483 
484 	/* Make gops functions visible before we start tracing */
485 	smp_mb();
486 
487 	ret = register_ftrace_graph(tr->gops);
488 	if (ret)
489 		return ret;
490 	tracing_start_cmdline_record();
491 
492 	return 0;
493 }
494 
495 static struct tracer graph_trace;
496 
ftrace_graph_trace_args(struct trace_array * tr,int set)497 static int ftrace_graph_trace_args(struct trace_array *tr, int set)
498 {
499 	trace_func_graph_ent_t entry;
500 
501 	if (set)
502 		entry = trace_graph_entry_args;
503 	else
504 		entry = trace_graph_entry;
505 
506 	/* See if there's any changes */
507 	if (tr->gops->entryfunc == entry)
508 		return 0;
509 
510 	unregister_ftrace_graph(tr->gops);
511 
512 	tr->gops->entryfunc = entry;
513 
514 	/* Make gops functions visible before we start tracing */
515 	smp_mb();
516 	return register_ftrace_graph(tr->gops);
517 }
518 
graph_trace_reset(struct trace_array * tr)519 static void graph_trace_reset(struct trace_array *tr)
520 {
521 	if (!tracer_flags_is_set(tr, TRACE_GRAPH_PRINT_IRQS))
522 		ftrace_graph_skip_irqs--;
523 	if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
524 		ftrace_graph_skip_irqs = 0;
525 
526 	if (!tracer_flags_is_set(tr, TRACE_GRAPH_SLEEP_TIME))
527 		fgraph_no_sleep_time--;
528 	if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
529 		fgraph_no_sleep_time = 0;
530 
531 	tracing_stop_cmdline_record();
532 	unregister_ftrace_graph(tr->gops);
533 }
534 
graph_trace_update_thresh(struct trace_array * tr)535 static int graph_trace_update_thresh(struct trace_array *tr)
536 {
537 	graph_trace_reset(tr);
538 	return graph_trace_init(tr);
539 }
540 
541 static int max_bytes_for_cpu;
542 
print_graph_cpu(struct trace_seq * s,int cpu)543 static void print_graph_cpu(struct trace_seq *s, int cpu)
544 {
545 	/*
546 	 * Start with a space character - to make it stand out
547 	 * to the right a bit when trace output is pasted into
548 	 * email:
549 	 */
550 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
551 }
552 
553 #define TRACE_GRAPH_PROCINFO_LENGTH	14
554 
print_graph_proc(struct trace_seq * s,pid_t pid)555 static void print_graph_proc(struct trace_seq *s, pid_t pid)
556 {
557 	char comm[TASK_COMM_LEN];
558 	/* sign + log10(MAX_INT) + '\0' */
559 	char pid_str[12];
560 	int spaces = 0;
561 	int len;
562 	int i;
563 
564 	trace_find_cmdline(pid, comm);
565 	comm[7] = '\0';
566 	sprintf(pid_str, "%d", pid);
567 
568 	/* 1 stands for the "-" character */
569 	len = strlen(comm) + strlen(pid_str) + 1;
570 
571 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
572 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
573 
574 	/* First spaces to align center */
575 	for (i = 0; i < spaces / 2; i++)
576 		trace_seq_putc(s, ' ');
577 
578 	trace_seq_printf(s, "%s-%s", comm, pid_str);
579 
580 	/* Last spaces to align center */
581 	for (i = 0; i < spaces - (spaces / 2); i++)
582 		trace_seq_putc(s, ' ');
583 }
584 
585 
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)586 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
587 {
588 	trace_seq_putc(s, ' ');
589 	trace_print_lat_fmt(s, entry);
590 	trace_seq_puts(s, " | ");
591 }
592 
593 /* If the pid changed since the last trace, output this event */
594 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)595 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
596 {
597 	pid_t prev_pid;
598 	pid_t *last_pid;
599 
600 	if (!data)
601 		return;
602 
603 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
604 
605 	if (*last_pid == pid)
606 		return;
607 
608 	prev_pid = *last_pid;
609 	*last_pid = pid;
610 
611 	if (prev_pid == -1)
612 		return;
613 /*
614  * Context-switch trace line:
615 
616  ------------------------------------------
617  | 1)  migration/0--1  =>  sshd-1755
618  ------------------------------------------
619 
620  */
621 	trace_seq_puts(s, " ------------------------------------------\n");
622 	print_graph_cpu(s, cpu);
623 	print_graph_proc(s, prev_pid);
624 	trace_seq_puts(s, " => ");
625 	print_graph_proc(s, pid);
626 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
627 }
628 
629 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)630 get_return_for_leaf(struct trace_iterator *iter,
631 		struct ftrace_graph_ent_entry *curr)
632 {
633 	struct fgraph_data *data = iter->private;
634 	struct ring_buffer_iter *ring_iter = NULL;
635 	struct ring_buffer_event *event;
636 	struct ftrace_graph_ret_entry *next;
637 
638 	/*
639 	 * If the previous output failed to write to the seq buffer,
640 	 * then we just reuse the data from before.
641 	 */
642 	if (data && data->failed) {
643 		curr = &data->ent.ent;
644 		next = &data->ret;
645 	} else {
646 
647 		ring_iter = trace_buffer_iter(iter, iter->cpu);
648 
649 		/* First peek to compare current entry and the next one */
650 		if (ring_iter)
651 			event = ring_buffer_iter_peek(ring_iter, NULL);
652 		else {
653 			/*
654 			 * We need to consume the current entry to see
655 			 * the next one.
656 			 */
657 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
658 					    NULL, NULL);
659 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
660 						 NULL, NULL);
661 		}
662 
663 		if (!event)
664 			return NULL;
665 
666 		next = ring_buffer_event_data(event);
667 
668 		if (data) {
669 			/*
670 			 * Save current and next entries for later reference
671 			 * if the output fails.
672 			 */
673 			int size = min_t(int, sizeof(data->rent), iter->ent_size);
674 
675 			memcpy(&data->rent, curr, size);
676 			/*
677 			 * If the next event is not a return type, then
678 			 * we only care about what type it is. Otherwise we can
679 			 * safely copy the entire event.
680 			 */
681 			if (next->ent.type == TRACE_GRAPH_RET)
682 				data->ret = *next;
683 			else
684 				data->ret.ent.type = next->ent.type;
685 		}
686 	}
687 
688 	if (next->ent.type != TRACE_GRAPH_RET)
689 		return NULL;
690 
691 	if (curr->ent.pid != next->ent.pid ||
692 			curr->graph_ent.func != next->ret.func)
693 		return NULL;
694 
695 	/* this is a leaf, now advance the iterator */
696 	if (ring_iter)
697 		ring_buffer_iter_advance(ring_iter);
698 
699 	return next;
700 }
701 
print_graph_abs_time(u64 t,struct trace_seq * s)702 static void print_graph_abs_time(u64 t, struct trace_seq *s)
703 {
704 	unsigned long usecs_rem;
705 
706 	usecs_rem = do_div(t, NSEC_PER_SEC);
707 	usecs_rem /= 1000;
708 
709 	trace_seq_printf(s, "%5lu.%06lu |  ",
710 			 (unsigned long)t, usecs_rem);
711 }
712 
713 static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)714 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
715 {
716 	unsigned long long usecs;
717 
718 	usecs = iter->ts - iter->array_buffer->time_start;
719 	do_div(usecs, NSEC_PER_USEC);
720 
721 	trace_seq_printf(s, "%9llu us |  ", usecs);
722 }
723 
724 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)725 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
726 		enum trace_type type, int cpu, pid_t pid, u32 flags)
727 {
728 	struct trace_array *tr = iter->tr;
729 	struct trace_seq *s = &iter->seq;
730 	struct trace_entry *ent = iter->ent;
731 
732 	addr += iter->tr->text_delta;
733 
734 	if (addr < (unsigned long)__irqentry_text_start ||
735 		addr >= (unsigned long)__irqentry_text_end)
736 		return;
737 
738 	if (tr->trace_flags & TRACE_ITER(CONTEXT_INFO)) {
739 		/* Absolute time */
740 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
741 			print_graph_abs_time(iter->ts, s);
742 
743 		/* Relative time */
744 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
745 			print_graph_rel_time(iter, s);
746 
747 		/* Cpu */
748 		if (flags & TRACE_GRAPH_PRINT_CPU)
749 			print_graph_cpu(s, cpu);
750 
751 		/* Proc */
752 		if (flags & TRACE_GRAPH_PRINT_PROC) {
753 			print_graph_proc(s, pid);
754 			trace_seq_puts(s, " | ");
755 		}
756 
757 		/* Latency format */
758 		if (tr->trace_flags & TRACE_ITER(LATENCY_FMT))
759 			print_graph_lat_fmt(s, ent);
760 	}
761 
762 	/* No overhead */
763 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
764 
765 	if (type == TRACE_GRAPH_ENT)
766 		trace_seq_puts(s, "==========>");
767 	else
768 		trace_seq_puts(s, "<==========");
769 
770 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
771 	trace_seq_putc(s, '\n');
772 }
773 
774 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)775 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
776 {
777 	unsigned long nsecs_rem = do_div(duration, 1000);
778 	/* log10(ULONG_MAX) + '\0' */
779 	char usecs_str[21];
780 	char nsecs_str[5];
781 	int len;
782 	int i;
783 
784 	sprintf(usecs_str, "%lu", (unsigned long) duration);
785 
786 	/* Print msecs */
787 	trace_seq_printf(s, "%s", usecs_str);
788 
789 	len = strlen(usecs_str);
790 
791 	/* Print nsecs (we don't want to exceed 7 numbers) */
792 	if (len < 7) {
793 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
794 
795 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
796 		trace_seq_printf(s, ".%s", nsecs_str);
797 		len += strlen(nsecs_str) + 1;
798 	}
799 
800 	trace_seq_puts(s, " us ");
801 
802 	/* Print remaining spaces to fit the row's width */
803 	for (i = len; i < 8; i++)
804 		trace_seq_putc(s, ' ');
805 }
806 
807 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)808 print_graph_duration(struct trace_array *tr, unsigned long long duration,
809 		     struct trace_seq *s, u32 flags)
810 {
811 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
812 	    !(tr->trace_flags & TRACE_ITER(CONTEXT_INFO)))
813 		return;
814 
815 	/* No real adata, just filling the column with spaces */
816 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
817 	case FLAGS_FILL_FULL:
818 		trace_seq_puts(s, "              |  ");
819 		return;
820 	case FLAGS_FILL_START:
821 		trace_seq_puts(s, "  ");
822 		return;
823 	case FLAGS_FILL_END:
824 		trace_seq_puts(s, " |");
825 		return;
826 	}
827 
828 	/* Signal a overhead of time execution to the output */
829 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
830 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
831 	else
832 		trace_seq_puts(s, "  ");
833 
834 	trace_print_graph_duration(duration, s);
835 	trace_seq_puts(s, "|  ");
836 }
837 
838 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
839 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
840 #else
841 #define __TRACE_GRAPH_PRINT_RETVAL 0
842 #endif
843 
844 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
845 #define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR
print_graph_retaddr(struct trace_seq * s,struct fgraph_retaddr_ent_entry * entry,u32 trace_flags,bool comment)846 static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry,
847 				u32 trace_flags, bool comment)
848 {
849 	if (comment)
850 		trace_seq_puts(s, " /*");
851 
852 	trace_seq_puts(s, " <-");
853 	seq_print_ip_sym_offset(s, entry->graph_rent.retaddr, trace_flags);
854 
855 	if (comment)
856 		trace_seq_puts(s, " */");
857 }
858 #else
859 #define __TRACE_GRAPH_PRINT_RETADDR 0
860 #define print_graph_retaddr(_seq, _entry, _tflags, _comment)		do { } while (0)
861 #endif
862 
863 #if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR)
864 
print_graph_retval(struct trace_seq * s,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret * graph_ret,void * func,u32 opt_flags,u32 trace_flags,int args_size)865 static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
866 				struct ftrace_graph_ret *graph_ret, void *func,
867 				u32 opt_flags, u32 trace_flags, int args_size)
868 {
869 	unsigned long err_code = 0;
870 	unsigned long retval = 0;
871 	bool print_retaddr = false;
872 	bool print_retval = false;
873 	bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX);
874 
875 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
876 	retval = graph_ret->retval;
877 	print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL);
878 #endif
879 
880 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
881 	print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR);
882 #endif
883 
884 	if (print_retval && retval && !hex_format) {
885 		/* Check if the return value matches the negative format */
886 		if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
887 			(((u64)retval) >> 32) == 0) {
888 			err_code = sign_extend64(retval, 31);
889 		} else {
890 			err_code = retval;
891 		}
892 
893 		if (!IS_ERR_VALUE(err_code))
894 			err_code = 0;
895 	}
896 
897 	if (entry) {
898 		if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
899 			print_retaddr = false;
900 
901 		trace_seq_printf(s, "%ps", func);
902 
903 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
904 			print_function_args(s, entry->args, (unsigned long)func);
905 			trace_seq_putc(s, ';');
906 		} else
907 			trace_seq_puts(s, "();");
908 
909 		if (print_retval || print_retaddr)
910 			trace_seq_puts(s, " /*");
911 	} else {
912 		print_retaddr = false;
913 		trace_seq_printf(s, "} /* %ps", func);
914 	}
915 
916 	if (print_retaddr)
917 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
918 				    trace_flags, false);
919 
920 	if (print_retval) {
921 		if (hex_format || (err_code == 0))
922 			trace_seq_printf(s, " ret=0x%lx", retval);
923 		else
924 			trace_seq_printf(s, " ret=%ld", err_code);
925 	}
926 
927 	if (!entry || print_retval || print_retaddr)
928 		trace_seq_puts(s, " */");
929 }
930 
931 #else
932 
933 #define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags, args_size) \
934 	do {} while (0)
935 
936 #endif
937 
938 /* Case of a leaf function on its call entry */
939 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)940 print_graph_entry_leaf(struct trace_iterator *iter,
941 		struct ftrace_graph_ent_entry *entry,
942 		struct ftrace_graph_ret_entry *ret_entry,
943 		struct trace_seq *s, u32 flags)
944 {
945 	struct fgraph_data *data = iter->private;
946 	struct trace_array *tr = iter->tr;
947 	struct ftrace_graph_ret *graph_ret;
948 	struct ftrace_graph_ent *call;
949 	unsigned long long duration;
950 	unsigned long ret_func;
951 	int args_size;
952 	int cpu = iter->cpu;
953 	int i;
954 
955 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
956 
957 	graph_ret = &ret_entry->ret;
958 	call = &entry->graph_ent;
959 	duration = ret_entry->rettime - ret_entry->calltime;
960 
961 	if (data) {
962 		struct fgraph_cpu_data *cpu_data;
963 
964 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
965 
966 		/*
967 		 * Comments display at + 1 to depth. Since
968 		 * this is a leaf function, keep the comments
969 		 * equal to this depth.
970 		 */
971 		cpu_data->depth = call->depth - 1;
972 
973 		/* No need to keep this function around for this depth */
974 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
975 		    !WARN_ON_ONCE(call->depth < 0))
976 			cpu_data->enter_funcs[call->depth] = 0;
977 	}
978 
979 	/* Overhead and duration */
980 	print_graph_duration(tr, duration, s, flags);
981 
982 	/* Function */
983 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
984 		trace_seq_putc(s, ' ');
985 
986 	ret_func = graph_ret->func + iter->tr->text_delta;
987 
988 	/*
989 	 * Write out the function return value or return address
990 	 */
991 	if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
992 		print_graph_retval(s, entry, graph_ret,
993 				   (void *)graph_ret->func + iter->tr->text_delta,
994 				   flags, tr->trace_flags, args_size);
995 	} else {
996 		trace_seq_printf(s, "%ps", (void *)ret_func);
997 
998 		if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
999 			print_function_args(s, FGRAPH_ENTRY_ARGS(entry), ret_func);
1000 			trace_seq_putc(s, ';');
1001 		} else
1002 			trace_seq_puts(s, "();");
1003 	}
1004 	trace_seq_putc(s, '\n');
1005 
1006 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
1007 			cpu, iter->ent->pid, flags);
1008 
1009 	return trace_handle_return(s);
1010 }
1011 
1012 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)1013 print_graph_entry_nested(struct trace_iterator *iter,
1014 			 struct ftrace_graph_ent_entry *entry,
1015 			 struct trace_seq *s, int cpu, u32 flags)
1016 {
1017 	struct ftrace_graph_ent *call = &entry->graph_ent;
1018 	struct fgraph_data *data = iter->private;
1019 	struct trace_array *tr = iter->tr;
1020 	unsigned long func;
1021 	int args_size;
1022 	int i;
1023 
1024 	if (data) {
1025 		struct fgraph_cpu_data *cpu_data;
1026 		int cpu = iter->cpu;
1027 
1028 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1029 		cpu_data->depth = call->depth;
1030 
1031 		/* Save this function pointer to see if the exit matches */
1032 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
1033 		    !WARN_ON_ONCE(call->depth < 0))
1034 			cpu_data->enter_funcs[call->depth] = call->func;
1035 	}
1036 
1037 	/* No time */
1038 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1039 
1040 	/* Function */
1041 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
1042 		trace_seq_putc(s, ' ');
1043 
1044 	func = call->func + iter->tr->text_delta;
1045 
1046 	trace_seq_printf(s, "%ps", (void *)func);
1047 
1048 	args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
1049 
1050 	if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1051 		print_function_args(s, FGRAPH_ENTRY_ARGS(entry), func);
1052 	else
1053 		trace_seq_puts(s, "()");
1054 
1055 	trace_seq_puts(s, " {");
1056 
1057 	if (flags & __TRACE_GRAPH_PRINT_RETADDR  &&
1058 		entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
1059 		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
1060 			tr->trace_flags, true);
1061 	trace_seq_putc(s, '\n');
1062 
1063 	if (trace_seq_has_overflowed(s))
1064 		return TRACE_TYPE_PARTIAL_LINE;
1065 
1066 	/*
1067 	 * we already consumed the current entry to check the next one
1068 	 * and see if this is a leaf.
1069 	 */
1070 	return TRACE_TYPE_NO_CONSUME;
1071 }
1072 
1073 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)1074 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
1075 		     int type, unsigned long addr, u32 flags)
1076 {
1077 	struct fgraph_data *data = iter->private;
1078 	struct trace_entry *ent = iter->ent;
1079 	struct trace_array *tr = iter->tr;
1080 	int cpu = iter->cpu;
1081 
1082 	/* Pid */
1083 	verif_pid(s, ent->pid, cpu, data);
1084 
1085 	if (type)
1086 		/* Interrupt */
1087 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
1088 
1089 	if (!(tr->trace_flags & TRACE_ITER(CONTEXT_INFO)))
1090 		return;
1091 
1092 	/* Absolute time */
1093 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1094 		print_graph_abs_time(iter->ts, s);
1095 
1096 	/* Relative time */
1097 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1098 		print_graph_rel_time(iter, s);
1099 
1100 	/* Cpu */
1101 	if (flags & TRACE_GRAPH_PRINT_CPU)
1102 		print_graph_cpu(s, cpu);
1103 
1104 	/* Proc */
1105 	if (flags & TRACE_GRAPH_PRINT_PROC) {
1106 		print_graph_proc(s, ent->pid);
1107 		trace_seq_puts(s, " | ");
1108 	}
1109 
1110 	/* Latency format */
1111 	if (tr->trace_flags & TRACE_ITER(LATENCY_FMT))
1112 		print_graph_lat_fmt(s, ent);
1113 
1114 	return;
1115 }
1116 
1117 /*
1118  * Entry check for irq code
1119  *
1120  * returns 1 if
1121  *  - we are inside irq code
1122  *  - we just entered irq code
1123  *
1124  * returns 0 if
1125  *  - funcgraph-interrupts option is set
1126  *  - we are not inside irq code
1127  */
1128 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)1129 check_irq_entry(struct trace_iterator *iter, u32 flags,
1130 		unsigned long addr, int depth)
1131 {
1132 	int cpu = iter->cpu;
1133 	int *depth_irq;
1134 	struct fgraph_data *data = iter->private;
1135 
1136 	addr += iter->tr->text_delta;
1137 
1138 	/*
1139 	 * If we are either displaying irqs, or we got called as
1140 	 * a graph event and private data does not exist,
1141 	 * then we bypass the irq check.
1142 	 */
1143 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1144 	    (!data))
1145 		return 0;
1146 
1147 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1148 
1149 	/*
1150 	 * We are inside the irq code
1151 	 */
1152 	if (*depth_irq >= 0)
1153 		return 1;
1154 
1155 	if ((addr < (unsigned long)__irqentry_text_start) ||
1156 	    (addr >= (unsigned long)__irqentry_text_end))
1157 		return 0;
1158 
1159 	/*
1160 	 * We are entering irq code.
1161 	 */
1162 	*depth_irq = depth;
1163 	return 1;
1164 }
1165 
1166 /*
1167  * Return check for irq code
1168  *
1169  * returns 1 if
1170  *  - we are inside irq code
1171  *  - we just left irq code
1172  *
1173  * returns 0 if
1174  *  - funcgraph-interrupts option is set
1175  *  - we are not inside irq code
1176  */
1177 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)1178 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1179 {
1180 	int cpu = iter->cpu;
1181 	int *depth_irq;
1182 	struct fgraph_data *data = iter->private;
1183 
1184 	/*
1185 	 * If we are either displaying irqs, or we got called as
1186 	 * a graph event and private data does not exist,
1187 	 * then we bypass the irq check.
1188 	 */
1189 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1190 	    (!data))
1191 		return 0;
1192 
1193 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1194 
1195 	/*
1196 	 * We are not inside the irq code.
1197 	 */
1198 	if (*depth_irq == -1)
1199 		return 0;
1200 
1201 	/*
1202 	 * We are inside the irq code, and this is returning entry.
1203 	 * Let's not trace it and clear the entry depth, since
1204 	 * we are out of irq code.
1205 	 *
1206 	 * This condition ensures that we 'leave the irq code' once
1207 	 * we are out of the entry depth. Thus protecting us from
1208 	 * the RETURN entry loss.
1209 	 */
1210 	if (*depth_irq >= depth) {
1211 		*depth_irq = -1;
1212 		return 1;
1213 	}
1214 
1215 	/*
1216 	 * We are inside the irq code, and this is not the entry.
1217 	 */
1218 	return 1;
1219 }
1220 
1221 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)1222 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1223 			struct trace_iterator *iter, u32 flags)
1224 {
1225 	struct fgraph_data *data = iter->private;
1226 	struct ftrace_graph_ent *call;
1227 	struct ftrace_graph_ret_entry *leaf_ret;
1228 	static enum print_line_t ret;
1229 	int cpu = iter->cpu;
1230 	/*
1231 	 * print_graph_entry() may consume the current event,
1232 	 * thus @field may become invalid, so we need to save it.
1233 	 * This function is shared by ftrace_graph_ent_entry and
1234 	 * fgraph_retaddr_ent_entry, the size of the latter one
1235 	 * is larger, but it is very small and can be safely saved
1236 	 * at the stack.
1237 	 */
1238 	struct ftrace_graph_ent_entry *entry;
1239 	struct fgraph_retaddr_ent_entry *rentry;
1240 	u8 save_buf[sizeof(*rentry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
1241 
1242 	/* The ent_size is expected to be as big as the entry */
1243 	if (iter->ent_size > sizeof(save_buf))
1244 		iter->ent_size = sizeof(save_buf);
1245 
1246 	entry = (void *)save_buf;
1247 	memcpy(entry, field, iter->ent_size);
1248 
1249 	call = &entry->graph_ent;
1250 
1251 	if (check_irq_entry(iter, flags, call->func, call->depth))
1252 		return TRACE_TYPE_HANDLED;
1253 
1254 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1255 
1256 	leaf_ret = get_return_for_leaf(iter, entry);
1257 	if (leaf_ret)
1258 		ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
1259 	else
1260 		ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
1261 
1262 	if (data) {
1263 		/*
1264 		 * If we failed to write our output, then we need to make
1265 		 * note of it. Because we already consumed our entry.
1266 		 */
1267 		if (s->full) {
1268 			data->failed = 1;
1269 			data->cpu = cpu;
1270 		} else
1271 			data->failed = 0;
1272 	}
1273 
1274 	return ret;
1275 }
1276 
1277 static enum print_line_t
print_graph_return(struct ftrace_graph_ret_entry * retentry,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1278 print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
1279 		   struct trace_entry *ent, struct trace_iterator *iter,
1280 		   u32 flags)
1281 {
1282 	struct ftrace_graph_ret *trace = &retentry->ret;
1283 	u64 calltime = retentry->calltime;
1284 	u64 rettime = retentry->rettime;
1285 	unsigned long long duration = rettime - calltime;
1286 	struct fgraph_data *data = iter->private;
1287 	struct trace_array *tr = iter->tr;
1288 	unsigned long func;
1289 	pid_t pid = ent->pid;
1290 	int cpu = iter->cpu;
1291 	int func_match = 1;
1292 	int i;
1293 
1294 	func = trace->func + iter->tr->text_delta;
1295 
1296 	if (check_irq_return(iter, flags, trace->depth))
1297 		return TRACE_TYPE_HANDLED;
1298 
1299 	if (data) {
1300 		struct fgraph_cpu_data *cpu_data;
1301 		int cpu = iter->cpu;
1302 
1303 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1304 
1305 		/*
1306 		 * Comments display at + 1 to depth. This is the
1307 		 * return from a function, we now want the comments
1308 		 * to display at the same level of the bracket.
1309 		 */
1310 		cpu_data->depth = trace->depth - 1;
1311 
1312 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1313 		    !WARN_ON_ONCE(trace->depth < 0)) {
1314 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1315 				func_match = 0;
1316 			cpu_data->enter_funcs[trace->depth] = 0;
1317 		}
1318 	}
1319 
1320 	print_graph_prologue(iter, s, 0, 0, flags);
1321 
1322 	/* Overhead and duration */
1323 	print_graph_duration(tr, duration, s, flags);
1324 
1325 	/* Closing brace */
1326 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1327 		trace_seq_putc(s, ' ');
1328 
1329 	/*
1330 	 * Always write out the function name and its return value if the
1331 	 * funcgraph-retval option is enabled.
1332 	 */
1333 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1334 		print_graph_retval(s, NULL, trace, (void *)func, flags,
1335 				   tr->trace_flags, 0);
1336 	} else {
1337 		/*
1338 		 * If the return function does not have a matching entry,
1339 		 * then the entry was lost. Instead of just printing
1340 		 * the '}' and letting the user guess what function this
1341 		 * belongs to, write out the function name. Always do
1342 		 * that if the funcgraph-tail option is enabled.
1343 		 */
1344 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1345 			trace_seq_puts(s, "}");
1346 		else
1347 			trace_seq_printf(s, "} /* %ps */", (void *)func);
1348 	}
1349 	trace_seq_putc(s, '\n');
1350 
1351 	/* Overrun */
1352 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1353 		trace_seq_printf(s, " (Overruns: %u)\n",
1354 				 trace->overrun);
1355 
1356 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1357 			cpu, pid, flags);
1358 
1359 	return trace_handle_return(s);
1360 }
1361 
1362 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1363 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1364 		    struct trace_iterator *iter, u32 flags)
1365 {
1366 	struct trace_array *tr = iter->tr;
1367 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1368 	struct fgraph_data *data = iter->private;
1369 	struct trace_event *event;
1370 	int depth = 0;
1371 	int ret;
1372 	int i;
1373 
1374 	if (data)
1375 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1376 
1377 	print_graph_prologue(iter, s, 0, 0, flags);
1378 
1379 	/* No time */
1380 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1381 
1382 	/* Indentation */
1383 	if (depth > 0)
1384 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1385 			trace_seq_putc(s, ' ');
1386 
1387 	/* The comment */
1388 	trace_seq_puts(s, "/* ");
1389 
1390 	switch (iter->ent->type) {
1391 	case TRACE_BPUTS:
1392 		ret = trace_print_bputs_msg_only(iter);
1393 		if (ret != TRACE_TYPE_HANDLED)
1394 			return ret;
1395 		break;
1396 	case TRACE_BPRINT:
1397 		ret = trace_print_bprintk_msg_only(iter);
1398 		if (ret != TRACE_TYPE_HANDLED)
1399 			return ret;
1400 		break;
1401 	case TRACE_PRINT:
1402 		ret = trace_print_printk_msg_only(iter);
1403 		if (ret != TRACE_TYPE_HANDLED)
1404 			return ret;
1405 		break;
1406 	default:
1407 		event = ftrace_find_event(ent->type);
1408 		if (!event)
1409 			return TRACE_TYPE_UNHANDLED;
1410 
1411 		ret = event->funcs->trace(iter, sym_flags, event);
1412 		if (ret != TRACE_TYPE_HANDLED)
1413 			return ret;
1414 	}
1415 
1416 	if (trace_seq_has_overflowed(s))
1417 		goto out;
1418 
1419 	/* Strip ending newline */
1420 	if (s->buffer[s->seq.len - 1] == '\n') {
1421 		s->buffer[s->seq.len - 1] = '\0';
1422 		s->seq.len--;
1423 	}
1424 
1425 	trace_seq_puts(s, " */\n");
1426  out:
1427 	return trace_handle_return(s);
1428 }
1429 
1430 
1431 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1432 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1433 {
1434 	struct ftrace_graph_ent_entry *field;
1435 	struct fgraph_data *data = iter->private;
1436 	struct trace_entry *entry = iter->ent;
1437 	struct trace_seq *s = &iter->seq;
1438 	int cpu = iter->cpu;
1439 	int ret;
1440 
1441 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1442 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1443 		return TRACE_TYPE_HANDLED;
1444 	}
1445 
1446 	/*
1447 	 * If the last output failed, there's a possibility we need
1448 	 * to print out the missing entry which would never go out.
1449 	 */
1450 	if (data && data->failed) {
1451 		field = &data->ent.ent;
1452 		iter->cpu = data->cpu;
1453 		ret = print_graph_entry(field, s, iter, flags);
1454 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1455 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1456 			ret = TRACE_TYPE_NO_CONSUME;
1457 		}
1458 		iter->cpu = cpu;
1459 		return ret;
1460 	}
1461 
1462 	switch (entry->type) {
1463 	case TRACE_GRAPH_ENT: {
1464 		trace_assign_type(field, entry);
1465 		return print_graph_entry(field, s, iter, flags);
1466 	}
1467 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1468 	case TRACE_GRAPH_RETADDR_ENT: {
1469 		/*
1470 		 * ftrace_graph_ent_entry and fgraph_retaddr_ent_entry have
1471 		 * similar functions and memory layouts. The only difference
1472 		 * is that the latter one has an extra retaddr member, so
1473 		 * they can share most of the logic.
1474 		 */
1475 		struct fgraph_retaddr_ent_entry *rfield;
1476 
1477 		trace_assign_type(rfield, entry);
1478 		return print_graph_entry((struct ftrace_graph_ent_entry *)rfield,
1479 					  s, iter, flags);
1480 	}
1481 #endif
1482 	case TRACE_GRAPH_RET: {
1483 		struct ftrace_graph_ret_entry *field;
1484 		trace_assign_type(field, entry);
1485 		return print_graph_return(field, s, entry, iter, flags);
1486 	}
1487 	case TRACE_STACK:
1488 	case TRACE_FN:
1489 		/* dont trace stack and functions as comments */
1490 		return TRACE_TYPE_UNHANDLED;
1491 
1492 	default:
1493 		return print_graph_comment(s, entry, iter, flags);
1494 	}
1495 
1496 	return TRACE_TYPE_HANDLED;
1497 }
1498 
1499 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1500 print_graph_function(struct trace_iterator *iter)
1501 {
1502 	struct trace_array *tr = iter->tr;
1503 	return print_graph_function_flags(iter, tr->current_trace_flags->val);
1504 }
1505 
1506 static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1507 print_graph_function_event(struct trace_iterator *iter, int flags,
1508 			   struct trace_event *event)
1509 {
1510 	return print_graph_function(iter);
1511 }
1512 
print_lat_header(struct seq_file * s,u32 flags)1513 static void print_lat_header(struct seq_file *s, u32 flags)
1514 {
1515 	static const char spaces[] = "                "	/* 16 spaces */
1516 		"    "					/* 4 spaces */
1517 		"                 ";			/* 17 spaces */
1518 	int size = 0;
1519 
1520 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1521 		size += 16;
1522 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1523 		size += 16;
1524 	if (flags & TRACE_GRAPH_PRINT_CPU)
1525 		size += 4;
1526 	if (flags & TRACE_GRAPH_PRINT_PROC)
1527 		size += 17;
1528 
1529 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1530 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1531 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1532 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1533 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1534 }
1535 
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1536 static void __print_graph_headers_flags(struct trace_array *tr,
1537 					struct seq_file *s, u32 flags)
1538 {
1539 	int lat = tr->trace_flags & TRACE_ITER(LATENCY_FMT);
1540 
1541 	if (lat)
1542 		print_lat_header(s, flags);
1543 
1544 	/* 1st line */
1545 	seq_putc(s, '#');
1546 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1547 		seq_puts(s, "     TIME       ");
1548 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1549 		seq_puts(s, "   REL TIME     ");
1550 	if (flags & TRACE_GRAPH_PRINT_CPU)
1551 		seq_puts(s, " CPU");
1552 	if (flags & TRACE_GRAPH_PRINT_PROC)
1553 		seq_puts(s, "  TASK/PID       ");
1554 	if (lat)
1555 		seq_puts(s, "||||   ");
1556 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1557 		seq_puts(s, "  DURATION   ");
1558 	seq_puts(s, "               FUNCTION CALLS\n");
1559 
1560 	/* 2nd line */
1561 	seq_putc(s, '#');
1562 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1563 		seq_puts(s, "      |         ");
1564 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1565 		seq_puts(s, "      |         ");
1566 	if (flags & TRACE_GRAPH_PRINT_CPU)
1567 		seq_puts(s, " |  ");
1568 	if (flags & TRACE_GRAPH_PRINT_PROC)
1569 		seq_puts(s, "   |    |        ");
1570 	if (lat)
1571 		seq_puts(s, "||||   ");
1572 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1573 		seq_puts(s, "   |   |      ");
1574 	seq_puts(s, "               |   |   |   |\n");
1575 }
1576 
print_graph_headers(struct seq_file * s)1577 static void print_graph_headers(struct seq_file *s)
1578 {
1579 	struct trace_iterator *iter = s->private;
1580 	struct trace_array *tr = iter->tr;
1581 
1582 	print_graph_headers_flags(s, tr->current_trace_flags->val);
1583 }
1584 
print_graph_headers_flags(struct seq_file * s,u32 flags)1585 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1586 {
1587 	struct trace_iterator *iter = s->private;
1588 	struct trace_array *tr = iter->tr;
1589 
1590 	if (!(tr->trace_flags & TRACE_ITER(CONTEXT_INFO)))
1591 		return;
1592 
1593 	if (tr->trace_flags & TRACE_ITER(LATENCY_FMT)) {
1594 		/* print nothing if the buffers are empty */
1595 		if (trace_empty(iter))
1596 			return;
1597 
1598 		print_trace_header(s, iter);
1599 	}
1600 
1601 	__print_graph_headers_flags(tr, s, flags);
1602 }
1603 
graph_trace_open(struct trace_iterator * iter)1604 void graph_trace_open(struct trace_iterator *iter)
1605 {
1606 	/* pid and depth on the last trace processed */
1607 	struct fgraph_data *data;
1608 	gfp_t gfpflags;
1609 	int cpu;
1610 
1611 	iter->private = NULL;
1612 
1613 	/* We can be called in atomic context via ftrace_dump() */
1614 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1615 
1616 	data = kzalloc(sizeof(*data), gfpflags);
1617 	if (!data)
1618 		goto out_err;
1619 
1620 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1621 	if (!data->cpu_data)
1622 		goto out_err_free;
1623 
1624 	for_each_possible_cpu(cpu) {
1625 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1626 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1627 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1628 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1629 
1630 		*pid = -1;
1631 		*depth = 0;
1632 		*ignore = 0;
1633 		*depth_irq = -1;
1634 	}
1635 
1636 	iter->private = data;
1637 
1638 	return;
1639 
1640  out_err_free:
1641 	kfree(data);
1642  out_err:
1643 	pr_warn("function graph tracer: not enough memory\n");
1644 }
1645 
graph_trace_close(struct trace_iterator * iter)1646 void graph_trace_close(struct trace_iterator *iter)
1647 {
1648 	struct fgraph_data *data = iter->private;
1649 
1650 	if (data) {
1651 		free_percpu(data->cpu_data);
1652 		kfree(data);
1653 		iter->private = NULL;
1654 	}
1655 }
1656 
1657 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1658 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1659 {
1660 /*
1661  * The function profiler gets updated even if function graph
1662  * isn't the current tracer. Handle it separately.
1663  */
1664 #ifdef CONFIG_FUNCTION_PROFILER
1665 	if (bit == TRACE_GRAPH_SLEEP_TIME && (tr->flags & TRACE_ARRAY_FL_GLOBAL) &&
1666 	    !!set == fprofile_no_sleep_time) {
1667 		if (set) {
1668 			fgraph_no_sleep_time--;
1669 			if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
1670 				fgraph_no_sleep_time = 0;
1671 			fprofile_no_sleep_time = false;
1672 		} else {
1673 			fgraph_no_sleep_time++;
1674 			fprofile_no_sleep_time = true;
1675 		}
1676 	}
1677 #endif
1678 
1679 	/* Do nothing if the current tracer is not this tracer */
1680 	if (tr->current_trace != &graph_trace)
1681 		return 0;
1682 
1683 	/* Do nothing if already set. */
1684 	if (!!set == !!(tr->current_trace_flags->val & bit))
1685 		return 0;
1686 
1687 	switch (bit) {
1688 	case TRACE_GRAPH_SLEEP_TIME:
1689 		if (set) {
1690 			fgraph_no_sleep_time--;
1691 			if (WARN_ON_ONCE(fgraph_no_sleep_time < 0))
1692 				fgraph_no_sleep_time = 0;
1693 		} else {
1694 			fgraph_no_sleep_time++;
1695 		}
1696 		break;
1697 
1698 	case TRACE_GRAPH_PRINT_IRQS:
1699 		if (set)
1700 			ftrace_graph_skip_irqs--;
1701 		else
1702 			ftrace_graph_skip_irqs++;
1703 		if (WARN_ON_ONCE(ftrace_graph_skip_irqs < 0))
1704 			ftrace_graph_skip_irqs = 0;
1705 		break;
1706 
1707 	case TRACE_GRAPH_ARGS:
1708 		return ftrace_graph_trace_args(tr, set);
1709 	}
1710 
1711 	return 0;
1712 }
1713 
1714 static struct trace_event_functions graph_functions = {
1715 	.trace		= print_graph_function_event,
1716 };
1717 
1718 static struct trace_event graph_trace_entry_event = {
1719 	.type		= TRACE_GRAPH_ENT,
1720 	.funcs		= &graph_functions,
1721 };
1722 
1723 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1724 static struct trace_event graph_trace_retaddr_entry_event = {
1725 	.type		= TRACE_GRAPH_RETADDR_ENT,
1726 	.funcs		= &graph_functions,
1727 };
1728 #endif
1729 
1730 static struct trace_event graph_trace_ret_event = {
1731 	.type		= TRACE_GRAPH_RET,
1732 	.funcs		= &graph_functions
1733 };
1734 
1735 static struct tracer graph_trace __tracer_data = {
1736 	.name		= "function_graph",
1737 	.update_thresh	= graph_trace_update_thresh,
1738 	.open		= graph_trace_open,
1739 	.pipe_open	= graph_trace_open,
1740 	.close		= graph_trace_close,
1741 	.pipe_close	= graph_trace_close,
1742 	.init		= graph_trace_init,
1743 	.reset		= graph_trace_reset,
1744 	.print_line	= print_graph_function,
1745 	.print_header	= print_graph_headers,
1746 	.default_flags	= &tracer_flags,
1747 	.set_flag	= func_graph_set_flag,
1748 	.allow_instances = true,
1749 #ifdef CONFIG_FTRACE_SELFTEST
1750 	.selftest	= trace_selftest_startup_function_graph,
1751 #endif
1752 };
1753 
1754 
1755 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1756 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1757 		  loff_t *ppos)
1758 {
1759 	unsigned long val;
1760 	int ret;
1761 
1762 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1763 	if (ret)
1764 		return ret;
1765 
1766 	fgraph_max_depth = val;
1767 
1768 	*ppos += cnt;
1769 
1770 	return cnt;
1771 }
1772 
1773 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1774 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1775 		 loff_t *ppos)
1776 {
1777 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1778 	int n;
1779 
1780 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1781 
1782 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1783 }
1784 
1785 static const struct file_operations graph_depth_fops = {
1786 	.open		= tracing_open_generic,
1787 	.write		= graph_depth_write,
1788 	.read		= graph_depth_read,
1789 	.llseek		= generic_file_llseek,
1790 };
1791 
init_graph_tracefs(void)1792 static __init int init_graph_tracefs(void)
1793 {
1794 	int ret;
1795 
1796 	ret = tracing_init_dentry();
1797 	if (ret)
1798 		return 0;
1799 
1800 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1801 			  NULL, &graph_depth_fops);
1802 
1803 	return 0;
1804 }
1805 fs_initcall(init_graph_tracefs);
1806 
init_graph_trace(void)1807 static __init int init_graph_trace(void)
1808 {
1809 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1810 
1811 	if (!register_trace_event(&graph_trace_entry_event)) {
1812 		pr_warn("Warning: could not register graph trace events\n");
1813 		return 1;
1814 	}
1815 
1816 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1817 	if (!register_trace_event(&graph_trace_retaddr_entry_event)) {
1818 		pr_warn("Warning: could not register graph trace retaddr events\n");
1819 		return 1;
1820 	}
1821 #endif
1822 
1823 	if (!register_trace_event(&graph_trace_ret_event)) {
1824 		pr_warn("Warning: could not register graph trace events\n");
1825 		return 1;
1826 	}
1827 
1828 	return register_tracer(&graph_trace);
1829 }
1830 
1831 core_initcall(init_graph_trace);
1832