xref: /linux/kernel/trace/trace_functions_graph.c (revision 6e59bcc9c8adec9a5bbedfa95a89946c56c510d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 	/* Display function return value ? */
63 	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 	/* Display function return value in hexadecimal format ? */
65 	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 	/* Include sleep time (scheduled out) between entry and return */
68 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69 
70 #ifdef CONFIG_FUNCTION_PROFILER
71 	/* Include time within nested functions */
72 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74 
75 	{ } /* Empty entry */
76 };
77 
78 static struct tracer_flags tracer_flags = {
79 	/* Don't display overruns, proc, or tail by default */
80 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 	.opts = trace_opts
84 };
85 
86 /*
87  * DURATION column is being also used to display IRQ signs,
88  * following values are used by print_graph_irq and others
89  * to fill in space into DURATION column.
90  */
91 enum {
92 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
93 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 };
96 
97 static void
98 print_graph_duration(struct trace_array *tr, unsigned long long duration,
99 		     struct trace_seq *s, u32 flags);
100 
101 int __trace_graph_entry(struct trace_array *tr,
102 				struct ftrace_graph_ent *trace,
103 				unsigned int trace_ctx)
104 {
105 	struct ring_buffer_event *event;
106 	struct trace_buffer *buffer = tr->array_buffer.buffer;
107 	struct ftrace_graph_ent_entry *entry;
108 
109 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
110 					  sizeof(*entry), trace_ctx);
111 	if (!event)
112 		return 0;
113 	entry	= ring_buffer_event_data(event);
114 	entry->graph_ent			= *trace;
115 	trace_buffer_unlock_commit_nostack(buffer, event);
116 
117 	return 1;
118 }
119 
120 static inline int ftrace_graph_ignore_irqs(void)
121 {
122 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
123 		return 0;
124 
125 	return in_hardirq();
126 }
127 
128 int trace_graph_entry(struct ftrace_graph_ent *trace,
129 		      struct fgraph_ops *gops)
130 {
131 	unsigned long *task_var = fgraph_get_task_var(gops);
132 	struct trace_array *tr = gops->private;
133 	struct trace_array_cpu *data;
134 	unsigned long flags;
135 	unsigned int trace_ctx;
136 	long disabled;
137 	int ret;
138 	int cpu;
139 
140 	if (*task_var & TRACE_GRAPH_NOTRACE)
141 		return 0;
142 
143 	/*
144 	 * Do not trace a function if it's filtered by set_graph_notrace.
145 	 * Make the index of ret stack negative to indicate that it should
146 	 * ignore further functions.  But it needs its own ret stack entry
147 	 * to recover the original index in order to continue tracing after
148 	 * returning from the function.
149 	 */
150 	if (ftrace_graph_notrace_addr(trace->func)) {
151 		*task_var |= TRACE_GRAPH_NOTRACE_BIT;
152 		/*
153 		 * Need to return 1 to have the return called
154 		 * that will clear the NOTRACE bit.
155 		 */
156 		return 1;
157 	}
158 
159 	if (!ftrace_trace_task(tr))
160 		return 0;
161 
162 	if (ftrace_graph_ignore_func(gops, trace))
163 		return 0;
164 
165 	if (ftrace_graph_ignore_irqs())
166 		return 0;
167 
168 	/*
169 	 * Stop here if tracing_threshold is set. We only write function return
170 	 * events to the ring buffer.
171 	 */
172 	if (tracing_thresh)
173 		return 1;
174 
175 	local_irq_save(flags);
176 	cpu = raw_smp_processor_id();
177 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
178 	disabled = atomic_inc_return(&data->disabled);
179 	if (likely(disabled == 1)) {
180 		trace_ctx = tracing_gen_ctx_flags(flags);
181 		ret = __trace_graph_entry(tr, trace, trace_ctx);
182 	} else {
183 		ret = 0;
184 	}
185 
186 	atomic_dec(&data->disabled);
187 	local_irq_restore(flags);
188 
189 	return ret;
190 }
191 
192 static void
193 __trace_graph_function(struct trace_array *tr,
194 		unsigned long ip, unsigned int trace_ctx)
195 {
196 	u64 time = trace_clock_local();
197 	struct ftrace_graph_ent ent = {
198 		.func  = ip,
199 		.depth = 0,
200 	};
201 	struct ftrace_graph_ret ret = {
202 		.func     = ip,
203 		.depth    = 0,
204 		.calltime = time,
205 		.rettime  = time,
206 	};
207 
208 	__trace_graph_entry(tr, &ent, trace_ctx);
209 	__trace_graph_return(tr, &ret, trace_ctx);
210 }
211 
212 void
213 trace_graph_function(struct trace_array *tr,
214 		unsigned long ip, unsigned long parent_ip,
215 		unsigned int trace_ctx)
216 {
217 	__trace_graph_function(tr, ip, trace_ctx);
218 }
219 
220 void __trace_graph_return(struct trace_array *tr,
221 				struct ftrace_graph_ret *trace,
222 				unsigned int trace_ctx)
223 {
224 	struct ring_buffer_event *event;
225 	struct trace_buffer *buffer = tr->array_buffer.buffer;
226 	struct ftrace_graph_ret_entry *entry;
227 
228 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
229 					  sizeof(*entry), trace_ctx);
230 	if (!event)
231 		return;
232 	entry	= ring_buffer_event_data(event);
233 	entry->ret				= *trace;
234 	trace_buffer_unlock_commit_nostack(buffer, event);
235 }
236 
237 void trace_graph_return(struct ftrace_graph_ret *trace,
238 			struct fgraph_ops *gops)
239 {
240 	unsigned long *task_var = fgraph_get_task_var(gops);
241 	struct trace_array *tr = gops->private;
242 	struct trace_array_cpu *data;
243 	unsigned long flags;
244 	unsigned int trace_ctx;
245 	long disabled;
246 	int cpu;
247 
248 	ftrace_graph_addr_finish(gops, trace);
249 
250 	if (*task_var & TRACE_GRAPH_NOTRACE) {
251 		*task_var &= ~TRACE_GRAPH_NOTRACE;
252 		return;
253 	}
254 
255 	local_irq_save(flags);
256 	cpu = raw_smp_processor_id();
257 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
258 	disabled = atomic_inc_return(&data->disabled);
259 	if (likely(disabled == 1)) {
260 		trace_ctx = tracing_gen_ctx_flags(flags);
261 		__trace_graph_return(tr, trace, trace_ctx);
262 	}
263 	atomic_dec(&data->disabled);
264 	local_irq_restore(flags);
265 }
266 
267 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
268 				      struct fgraph_ops *gops)
269 {
270 	ftrace_graph_addr_finish(gops, trace);
271 
272 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
273 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
274 		return;
275 	}
276 
277 	if (tracing_thresh &&
278 	    (trace->rettime - trace->calltime < tracing_thresh))
279 		return;
280 	else
281 		trace_graph_return(trace, gops);
282 }
283 
284 static struct fgraph_ops funcgraph_ops = {
285 	.entryfunc = &trace_graph_entry,
286 	.retfunc = &trace_graph_return,
287 };
288 
289 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
290 {
291 	struct fgraph_ops *gops;
292 
293 	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
294 	if (!gops)
295 		return -ENOMEM;
296 
297 	gops->entryfunc = &trace_graph_entry;
298 	gops->retfunc = &trace_graph_return;
299 
300 	tr->gops = gops;
301 	gops->private = tr;
302 
303 	fgraph_init_ops(&gops->ops, ops);
304 
305 	return 0;
306 }
307 
308 void free_fgraph_ops(struct trace_array *tr)
309 {
310 	kfree(tr->gops);
311 }
312 
313 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
314 {
315 	tr->gops = &funcgraph_ops;
316 	funcgraph_ops.private = tr;
317 	fgraph_init_ops(&tr->gops->ops, ops);
318 }
319 
320 static int graph_trace_init(struct trace_array *tr)
321 {
322 	int ret;
323 
324 	tr->gops->entryfunc = trace_graph_entry;
325 
326 	if (tracing_thresh)
327 		tr->gops->retfunc = trace_graph_thresh_return;
328 	else
329 		tr->gops->retfunc = trace_graph_return;
330 
331 	/* Make gops functions are visible before we start tracing */
332 	smp_mb();
333 
334 	ret = register_ftrace_graph(tr->gops);
335 	if (ret)
336 		return ret;
337 	tracing_start_cmdline_record();
338 
339 	return 0;
340 }
341 
342 static void graph_trace_reset(struct trace_array *tr)
343 {
344 	tracing_stop_cmdline_record();
345 	unregister_ftrace_graph(tr->gops);
346 }
347 
348 static int graph_trace_update_thresh(struct trace_array *tr)
349 {
350 	graph_trace_reset(tr);
351 	return graph_trace_init(tr);
352 }
353 
354 static int max_bytes_for_cpu;
355 
356 static void print_graph_cpu(struct trace_seq *s, int cpu)
357 {
358 	/*
359 	 * Start with a space character - to make it stand out
360 	 * to the right a bit when trace output is pasted into
361 	 * email:
362 	 */
363 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
364 }
365 
366 #define TRACE_GRAPH_PROCINFO_LENGTH	14
367 
368 static void print_graph_proc(struct trace_seq *s, pid_t pid)
369 {
370 	char comm[TASK_COMM_LEN];
371 	/* sign + log10(MAX_INT) + '\0' */
372 	char pid_str[11];
373 	int spaces = 0;
374 	int len;
375 	int i;
376 
377 	trace_find_cmdline(pid, comm);
378 	comm[7] = '\0';
379 	sprintf(pid_str, "%d", pid);
380 
381 	/* 1 stands for the "-" character */
382 	len = strlen(comm) + strlen(pid_str) + 1;
383 
384 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
385 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
386 
387 	/* First spaces to align center */
388 	for (i = 0; i < spaces / 2; i++)
389 		trace_seq_putc(s, ' ');
390 
391 	trace_seq_printf(s, "%s-%s", comm, pid_str);
392 
393 	/* Last spaces to align center */
394 	for (i = 0; i < spaces - (spaces / 2); i++)
395 		trace_seq_putc(s, ' ');
396 }
397 
398 
399 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
400 {
401 	trace_seq_putc(s, ' ');
402 	trace_print_lat_fmt(s, entry);
403 	trace_seq_puts(s, " | ");
404 }
405 
406 /* If the pid changed since the last trace, output this event */
407 static void
408 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
409 {
410 	pid_t prev_pid;
411 	pid_t *last_pid;
412 
413 	if (!data)
414 		return;
415 
416 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
417 
418 	if (*last_pid == pid)
419 		return;
420 
421 	prev_pid = *last_pid;
422 	*last_pid = pid;
423 
424 	if (prev_pid == -1)
425 		return;
426 /*
427  * Context-switch trace line:
428 
429  ------------------------------------------
430  | 1)  migration/0--1  =>  sshd-1755
431  ------------------------------------------
432 
433  */
434 	trace_seq_puts(s, " ------------------------------------------\n");
435 	print_graph_cpu(s, cpu);
436 	print_graph_proc(s, prev_pid);
437 	trace_seq_puts(s, " => ");
438 	print_graph_proc(s, pid);
439 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
440 }
441 
442 static struct ftrace_graph_ret_entry *
443 get_return_for_leaf(struct trace_iterator *iter,
444 		struct ftrace_graph_ent_entry *curr)
445 {
446 	struct fgraph_data *data = iter->private;
447 	struct ring_buffer_iter *ring_iter = NULL;
448 	struct ring_buffer_event *event;
449 	struct ftrace_graph_ret_entry *next;
450 
451 	/*
452 	 * If the previous output failed to write to the seq buffer,
453 	 * then we just reuse the data from before.
454 	 */
455 	if (data && data->failed) {
456 		curr = &data->ent;
457 		next = &data->ret;
458 	} else {
459 
460 		ring_iter = trace_buffer_iter(iter, iter->cpu);
461 
462 		/* First peek to compare current entry and the next one */
463 		if (ring_iter)
464 			event = ring_buffer_iter_peek(ring_iter, NULL);
465 		else {
466 			/*
467 			 * We need to consume the current entry to see
468 			 * the next one.
469 			 */
470 			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
471 					    NULL, NULL);
472 			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
473 						 NULL, NULL);
474 		}
475 
476 		if (!event)
477 			return NULL;
478 
479 		next = ring_buffer_event_data(event);
480 
481 		if (data) {
482 			/*
483 			 * Save current and next entries for later reference
484 			 * if the output fails.
485 			 */
486 			data->ent = *curr;
487 			/*
488 			 * If the next event is not a return type, then
489 			 * we only care about what type it is. Otherwise we can
490 			 * safely copy the entire event.
491 			 */
492 			if (next->ent.type == TRACE_GRAPH_RET)
493 				data->ret = *next;
494 			else
495 				data->ret.ent.type = next->ent.type;
496 		}
497 	}
498 
499 	if (next->ent.type != TRACE_GRAPH_RET)
500 		return NULL;
501 
502 	if (curr->ent.pid != next->ent.pid ||
503 			curr->graph_ent.func != next->ret.func)
504 		return NULL;
505 
506 	/* this is a leaf, now advance the iterator */
507 	if (ring_iter)
508 		ring_buffer_iter_advance(ring_iter);
509 
510 	return next;
511 }
512 
513 static void print_graph_abs_time(u64 t, struct trace_seq *s)
514 {
515 	unsigned long usecs_rem;
516 
517 	usecs_rem = do_div(t, NSEC_PER_SEC);
518 	usecs_rem /= 1000;
519 
520 	trace_seq_printf(s, "%5lu.%06lu |  ",
521 			 (unsigned long)t, usecs_rem);
522 }
523 
524 static void
525 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
526 {
527 	unsigned long long usecs;
528 
529 	usecs = iter->ts - iter->array_buffer->time_start;
530 	do_div(usecs, NSEC_PER_USEC);
531 
532 	trace_seq_printf(s, "%9llu us |  ", usecs);
533 }
534 
535 static void
536 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
537 		enum trace_type type, int cpu, pid_t pid, u32 flags)
538 {
539 	struct trace_array *tr = iter->tr;
540 	struct trace_seq *s = &iter->seq;
541 	struct trace_entry *ent = iter->ent;
542 
543 	addr += iter->tr->text_delta;
544 
545 	if (addr < (unsigned long)__irqentry_text_start ||
546 		addr >= (unsigned long)__irqentry_text_end)
547 		return;
548 
549 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
550 		/* Absolute time */
551 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
552 			print_graph_abs_time(iter->ts, s);
553 
554 		/* Relative time */
555 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
556 			print_graph_rel_time(iter, s);
557 
558 		/* Cpu */
559 		if (flags & TRACE_GRAPH_PRINT_CPU)
560 			print_graph_cpu(s, cpu);
561 
562 		/* Proc */
563 		if (flags & TRACE_GRAPH_PRINT_PROC) {
564 			print_graph_proc(s, pid);
565 			trace_seq_puts(s, " | ");
566 		}
567 
568 		/* Latency format */
569 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
570 			print_graph_lat_fmt(s, ent);
571 	}
572 
573 	/* No overhead */
574 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
575 
576 	if (type == TRACE_GRAPH_ENT)
577 		trace_seq_puts(s, "==========>");
578 	else
579 		trace_seq_puts(s, "<==========");
580 
581 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
582 	trace_seq_putc(s, '\n');
583 }
584 
585 void
586 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
587 {
588 	unsigned long nsecs_rem = do_div(duration, 1000);
589 	/* log10(ULONG_MAX) + '\0' */
590 	char usecs_str[21];
591 	char nsecs_str[5];
592 	int len;
593 	int i;
594 
595 	sprintf(usecs_str, "%lu", (unsigned long) duration);
596 
597 	/* Print msecs */
598 	trace_seq_printf(s, "%s", usecs_str);
599 
600 	len = strlen(usecs_str);
601 
602 	/* Print nsecs (we don't want to exceed 7 numbers) */
603 	if (len < 7) {
604 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
605 
606 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
607 		trace_seq_printf(s, ".%s", nsecs_str);
608 		len += strlen(nsecs_str) + 1;
609 	}
610 
611 	trace_seq_puts(s, " us ");
612 
613 	/* Print remaining spaces to fit the row's width */
614 	for (i = len; i < 8; i++)
615 		trace_seq_putc(s, ' ');
616 }
617 
618 static void
619 print_graph_duration(struct trace_array *tr, unsigned long long duration,
620 		     struct trace_seq *s, u32 flags)
621 {
622 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
623 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
624 		return;
625 
626 	/* No real adata, just filling the column with spaces */
627 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
628 	case FLAGS_FILL_FULL:
629 		trace_seq_puts(s, "              |  ");
630 		return;
631 	case FLAGS_FILL_START:
632 		trace_seq_puts(s, "  ");
633 		return;
634 	case FLAGS_FILL_END:
635 		trace_seq_puts(s, " |");
636 		return;
637 	}
638 
639 	/* Signal a overhead of time execution to the output */
640 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
641 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
642 	else
643 		trace_seq_puts(s, "  ");
644 
645 	trace_print_graph_duration(duration, s);
646 	trace_seq_puts(s, "|  ");
647 }
648 
649 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
650 
651 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
652 
653 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
654 				bool leaf, void *func, bool hex_format)
655 {
656 	unsigned long err_code = 0;
657 
658 	if (retval == 0 || hex_format)
659 		goto done;
660 
661 	/* Check if the return value matches the negative format */
662 	if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
663 		(((u64)retval) >> 32) == 0) {
664 		/* sign extension */
665 		err_code = (unsigned long)(s32)retval;
666 	} else {
667 		err_code = retval;
668 	}
669 
670 	if (!IS_ERR_VALUE(err_code))
671 		err_code = 0;
672 
673 done:
674 	if (leaf) {
675 		if (hex_format || (err_code == 0))
676 			trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
677 					func, retval);
678 		else
679 			trace_seq_printf(s, "%ps(); /* = %ld */\n",
680 					func, err_code);
681 	} else {
682 		if (hex_format || (err_code == 0))
683 			trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
684 					func, retval);
685 		else
686 			trace_seq_printf(s, "} /* %ps = %ld */\n",
687 					func, err_code);
688 	}
689 }
690 
691 #else
692 
693 #define __TRACE_GRAPH_PRINT_RETVAL 0
694 
695 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
696 
697 #endif
698 
699 /* Case of a leaf function on its call entry */
700 static enum print_line_t
701 print_graph_entry_leaf(struct trace_iterator *iter,
702 		struct ftrace_graph_ent_entry *entry,
703 		struct ftrace_graph_ret_entry *ret_entry,
704 		struct trace_seq *s, u32 flags)
705 {
706 	struct fgraph_data *data = iter->private;
707 	struct trace_array *tr = iter->tr;
708 	struct ftrace_graph_ret *graph_ret;
709 	struct ftrace_graph_ent *call;
710 	unsigned long long duration;
711 	unsigned long func;
712 	int cpu = iter->cpu;
713 	int i;
714 
715 	graph_ret = &ret_entry->ret;
716 	call = &entry->graph_ent;
717 	duration = graph_ret->rettime - graph_ret->calltime;
718 
719 	func = call->func + iter->tr->text_delta;
720 
721 	if (data) {
722 		struct fgraph_cpu_data *cpu_data;
723 
724 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
725 
726 		/*
727 		 * Comments display at + 1 to depth. Since
728 		 * this is a leaf function, keep the comments
729 		 * equal to this depth.
730 		 */
731 		cpu_data->depth = call->depth - 1;
732 
733 		/* No need to keep this function around for this depth */
734 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
735 		    !WARN_ON_ONCE(call->depth < 0))
736 			cpu_data->enter_funcs[call->depth] = 0;
737 	}
738 
739 	/* Overhead and duration */
740 	print_graph_duration(tr, duration, s, flags);
741 
742 	/* Function */
743 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
744 		trace_seq_putc(s, ' ');
745 
746 	/*
747 	 * Write out the function return value if the option function-retval is
748 	 * enabled.
749 	 */
750 	if (flags & __TRACE_GRAPH_PRINT_RETVAL)
751 		print_graph_retval(s, graph_ret->retval, true, (void *)func,
752 				!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
753 	else
754 		trace_seq_printf(s, "%ps();\n", (void *)func);
755 
756 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
757 			cpu, iter->ent->pid, flags);
758 
759 	return trace_handle_return(s);
760 }
761 
762 static enum print_line_t
763 print_graph_entry_nested(struct trace_iterator *iter,
764 			 struct ftrace_graph_ent_entry *entry,
765 			 struct trace_seq *s, int cpu, u32 flags)
766 {
767 	struct ftrace_graph_ent *call = &entry->graph_ent;
768 	struct fgraph_data *data = iter->private;
769 	struct trace_array *tr = iter->tr;
770 	unsigned long func;
771 	int i;
772 
773 	if (data) {
774 		struct fgraph_cpu_data *cpu_data;
775 		int cpu = iter->cpu;
776 
777 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
778 		cpu_data->depth = call->depth;
779 
780 		/* Save this function pointer to see if the exit matches */
781 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
782 		    !WARN_ON_ONCE(call->depth < 0))
783 			cpu_data->enter_funcs[call->depth] = call->func;
784 	}
785 
786 	/* No time */
787 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
788 
789 	/* Function */
790 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
791 		trace_seq_putc(s, ' ');
792 
793 	func = call->func + iter->tr->text_delta;
794 
795 	trace_seq_printf(s, "%ps() {\n", (void *)func);
796 
797 	if (trace_seq_has_overflowed(s))
798 		return TRACE_TYPE_PARTIAL_LINE;
799 
800 	/*
801 	 * we already consumed the current entry to check the next one
802 	 * and see if this is a leaf.
803 	 */
804 	return TRACE_TYPE_NO_CONSUME;
805 }
806 
807 static void
808 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
809 		     int type, unsigned long addr, u32 flags)
810 {
811 	struct fgraph_data *data = iter->private;
812 	struct trace_entry *ent = iter->ent;
813 	struct trace_array *tr = iter->tr;
814 	int cpu = iter->cpu;
815 
816 	/* Pid */
817 	verif_pid(s, ent->pid, cpu, data);
818 
819 	if (type)
820 		/* Interrupt */
821 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
822 
823 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
824 		return;
825 
826 	/* Absolute time */
827 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
828 		print_graph_abs_time(iter->ts, s);
829 
830 	/* Relative time */
831 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
832 		print_graph_rel_time(iter, s);
833 
834 	/* Cpu */
835 	if (flags & TRACE_GRAPH_PRINT_CPU)
836 		print_graph_cpu(s, cpu);
837 
838 	/* Proc */
839 	if (flags & TRACE_GRAPH_PRINT_PROC) {
840 		print_graph_proc(s, ent->pid);
841 		trace_seq_puts(s, " | ");
842 	}
843 
844 	/* Latency format */
845 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
846 		print_graph_lat_fmt(s, ent);
847 
848 	return;
849 }
850 
851 /*
852  * Entry check for irq code
853  *
854  * returns 1 if
855  *  - we are inside irq code
856  *  - we just entered irq code
857  *
858  * returns 0 if
859  *  - funcgraph-interrupts option is set
860  *  - we are not inside irq code
861  */
862 static int
863 check_irq_entry(struct trace_iterator *iter, u32 flags,
864 		unsigned long addr, int depth)
865 {
866 	int cpu = iter->cpu;
867 	int *depth_irq;
868 	struct fgraph_data *data = iter->private;
869 
870 	addr += iter->tr->text_delta;
871 
872 	/*
873 	 * If we are either displaying irqs, or we got called as
874 	 * a graph event and private data does not exist,
875 	 * then we bypass the irq check.
876 	 */
877 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
878 	    (!data))
879 		return 0;
880 
881 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
882 
883 	/*
884 	 * We are inside the irq code
885 	 */
886 	if (*depth_irq >= 0)
887 		return 1;
888 
889 	if ((addr < (unsigned long)__irqentry_text_start) ||
890 	    (addr >= (unsigned long)__irqentry_text_end))
891 		return 0;
892 
893 	/*
894 	 * We are entering irq code.
895 	 */
896 	*depth_irq = depth;
897 	return 1;
898 }
899 
900 /*
901  * Return check for irq code
902  *
903  * returns 1 if
904  *  - we are inside irq code
905  *  - we just left irq code
906  *
907  * returns 0 if
908  *  - funcgraph-interrupts option is set
909  *  - we are not inside irq code
910  */
911 static int
912 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
913 {
914 	int cpu = iter->cpu;
915 	int *depth_irq;
916 	struct fgraph_data *data = iter->private;
917 
918 	/*
919 	 * If we are either displaying irqs, or we got called as
920 	 * a graph event and private data does not exist,
921 	 * then we bypass the irq check.
922 	 */
923 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
924 	    (!data))
925 		return 0;
926 
927 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
928 
929 	/*
930 	 * We are not inside the irq code.
931 	 */
932 	if (*depth_irq == -1)
933 		return 0;
934 
935 	/*
936 	 * We are inside the irq code, and this is returning entry.
937 	 * Let's not trace it and clear the entry depth, since
938 	 * we are out of irq code.
939 	 *
940 	 * This condition ensures that we 'leave the irq code' once
941 	 * we are out of the entry depth. Thus protecting us from
942 	 * the RETURN entry loss.
943 	 */
944 	if (*depth_irq >= depth) {
945 		*depth_irq = -1;
946 		return 1;
947 	}
948 
949 	/*
950 	 * We are inside the irq code, and this is not the entry.
951 	 */
952 	return 1;
953 }
954 
955 static enum print_line_t
956 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
957 			struct trace_iterator *iter, u32 flags)
958 {
959 	struct fgraph_data *data = iter->private;
960 	struct ftrace_graph_ent *call = &field->graph_ent;
961 	struct ftrace_graph_ret_entry *leaf_ret;
962 	static enum print_line_t ret;
963 	int cpu = iter->cpu;
964 
965 	if (check_irq_entry(iter, flags, call->func, call->depth))
966 		return TRACE_TYPE_HANDLED;
967 
968 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
969 
970 	leaf_ret = get_return_for_leaf(iter, field);
971 	if (leaf_ret)
972 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
973 	else
974 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
975 
976 	if (data) {
977 		/*
978 		 * If we failed to write our output, then we need to make
979 		 * note of it. Because we already consumed our entry.
980 		 */
981 		if (s->full) {
982 			data->failed = 1;
983 			data->cpu = cpu;
984 		} else
985 			data->failed = 0;
986 	}
987 
988 	return ret;
989 }
990 
991 static enum print_line_t
992 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
993 		   struct trace_entry *ent, struct trace_iterator *iter,
994 		   u32 flags)
995 {
996 	unsigned long long duration = trace->rettime - trace->calltime;
997 	struct fgraph_data *data = iter->private;
998 	struct trace_array *tr = iter->tr;
999 	unsigned long func;
1000 	pid_t pid = ent->pid;
1001 	int cpu = iter->cpu;
1002 	int func_match = 1;
1003 	int i;
1004 
1005 	func = trace->func + iter->tr->text_delta;
1006 
1007 	if (check_irq_return(iter, flags, trace->depth))
1008 		return TRACE_TYPE_HANDLED;
1009 
1010 	if (data) {
1011 		struct fgraph_cpu_data *cpu_data;
1012 		int cpu = iter->cpu;
1013 
1014 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1015 
1016 		/*
1017 		 * Comments display at + 1 to depth. This is the
1018 		 * return from a function, we now want the comments
1019 		 * to display at the same level of the bracket.
1020 		 */
1021 		cpu_data->depth = trace->depth - 1;
1022 
1023 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1024 		    !WARN_ON_ONCE(trace->depth < 0)) {
1025 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1026 				func_match = 0;
1027 			cpu_data->enter_funcs[trace->depth] = 0;
1028 		}
1029 	}
1030 
1031 	print_graph_prologue(iter, s, 0, 0, flags);
1032 
1033 	/* Overhead and duration */
1034 	print_graph_duration(tr, duration, s, flags);
1035 
1036 	/* Closing brace */
1037 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1038 		trace_seq_putc(s, ' ');
1039 
1040 	/*
1041 	 * Always write out the function name and its return value if the
1042 	 * function-retval option is enabled.
1043 	 */
1044 	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1045 		print_graph_retval(s, trace->retval, false, (void *)func,
1046 			!!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1047 	} else {
1048 		/*
1049 		 * If the return function does not have a matching entry,
1050 		 * then the entry was lost. Instead of just printing
1051 		 * the '}' and letting the user guess what function this
1052 		 * belongs to, write out the function name. Always do
1053 		 * that if the funcgraph-tail option is enabled.
1054 		 */
1055 		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1056 			trace_seq_puts(s, "}\n");
1057 		else
1058 			trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1059 	}
1060 
1061 	/* Overrun */
1062 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1063 		trace_seq_printf(s, " (Overruns: %u)\n",
1064 				 trace->overrun);
1065 
1066 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1067 			cpu, pid, flags);
1068 
1069 	return trace_handle_return(s);
1070 }
1071 
1072 static enum print_line_t
1073 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1074 		    struct trace_iterator *iter, u32 flags)
1075 {
1076 	struct trace_array *tr = iter->tr;
1077 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1078 	struct fgraph_data *data = iter->private;
1079 	struct trace_event *event;
1080 	int depth = 0;
1081 	int ret;
1082 	int i;
1083 
1084 	if (data)
1085 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1086 
1087 	print_graph_prologue(iter, s, 0, 0, flags);
1088 
1089 	/* No time */
1090 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1091 
1092 	/* Indentation */
1093 	if (depth > 0)
1094 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1095 			trace_seq_putc(s, ' ');
1096 
1097 	/* The comment */
1098 	trace_seq_puts(s, "/* ");
1099 
1100 	switch (iter->ent->type) {
1101 	case TRACE_BPUTS:
1102 		ret = trace_print_bputs_msg_only(iter);
1103 		if (ret != TRACE_TYPE_HANDLED)
1104 			return ret;
1105 		break;
1106 	case TRACE_BPRINT:
1107 		ret = trace_print_bprintk_msg_only(iter);
1108 		if (ret != TRACE_TYPE_HANDLED)
1109 			return ret;
1110 		break;
1111 	case TRACE_PRINT:
1112 		ret = trace_print_printk_msg_only(iter);
1113 		if (ret != TRACE_TYPE_HANDLED)
1114 			return ret;
1115 		break;
1116 	default:
1117 		event = ftrace_find_event(ent->type);
1118 		if (!event)
1119 			return TRACE_TYPE_UNHANDLED;
1120 
1121 		ret = event->funcs->trace(iter, sym_flags, event);
1122 		if (ret != TRACE_TYPE_HANDLED)
1123 			return ret;
1124 	}
1125 
1126 	if (trace_seq_has_overflowed(s))
1127 		goto out;
1128 
1129 	/* Strip ending newline */
1130 	if (s->buffer[s->seq.len - 1] == '\n') {
1131 		s->buffer[s->seq.len - 1] = '\0';
1132 		s->seq.len--;
1133 	}
1134 
1135 	trace_seq_puts(s, " */\n");
1136  out:
1137 	return trace_handle_return(s);
1138 }
1139 
1140 
1141 enum print_line_t
1142 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1143 {
1144 	struct ftrace_graph_ent_entry *field;
1145 	struct fgraph_data *data = iter->private;
1146 	struct trace_entry *entry = iter->ent;
1147 	struct trace_seq *s = &iter->seq;
1148 	int cpu = iter->cpu;
1149 	int ret;
1150 
1151 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1152 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1153 		return TRACE_TYPE_HANDLED;
1154 	}
1155 
1156 	/*
1157 	 * If the last output failed, there's a possibility we need
1158 	 * to print out the missing entry which would never go out.
1159 	 */
1160 	if (data && data->failed) {
1161 		field = &data->ent;
1162 		iter->cpu = data->cpu;
1163 		ret = print_graph_entry(field, s, iter, flags);
1164 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1165 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1166 			ret = TRACE_TYPE_NO_CONSUME;
1167 		}
1168 		iter->cpu = cpu;
1169 		return ret;
1170 	}
1171 
1172 	switch (entry->type) {
1173 	case TRACE_GRAPH_ENT: {
1174 		/*
1175 		 * print_graph_entry() may consume the current event,
1176 		 * thus @field may become invalid, so we need to save it.
1177 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1178 		 * it can be safely saved at the stack.
1179 		 */
1180 		struct ftrace_graph_ent_entry saved;
1181 		trace_assign_type(field, entry);
1182 		saved = *field;
1183 		return print_graph_entry(&saved, s, iter, flags);
1184 	}
1185 	case TRACE_GRAPH_RET: {
1186 		struct ftrace_graph_ret_entry *field;
1187 		trace_assign_type(field, entry);
1188 		return print_graph_return(&field->ret, s, entry, iter, flags);
1189 	}
1190 	case TRACE_STACK:
1191 	case TRACE_FN:
1192 		/* dont trace stack and functions as comments */
1193 		return TRACE_TYPE_UNHANDLED;
1194 
1195 	default:
1196 		return print_graph_comment(s, entry, iter, flags);
1197 	}
1198 
1199 	return TRACE_TYPE_HANDLED;
1200 }
1201 
1202 static enum print_line_t
1203 print_graph_function(struct trace_iterator *iter)
1204 {
1205 	return print_graph_function_flags(iter, tracer_flags.val);
1206 }
1207 
1208 static enum print_line_t
1209 print_graph_function_event(struct trace_iterator *iter, int flags,
1210 			   struct trace_event *event)
1211 {
1212 	return print_graph_function(iter);
1213 }
1214 
1215 static void print_lat_header(struct seq_file *s, u32 flags)
1216 {
1217 	static const char spaces[] = "                "	/* 16 spaces */
1218 		"    "					/* 4 spaces */
1219 		"                 ";			/* 17 spaces */
1220 	int size = 0;
1221 
1222 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1223 		size += 16;
1224 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1225 		size += 16;
1226 	if (flags & TRACE_GRAPH_PRINT_CPU)
1227 		size += 4;
1228 	if (flags & TRACE_GRAPH_PRINT_PROC)
1229 		size += 17;
1230 
1231 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1232 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1233 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1234 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1235 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1236 }
1237 
1238 static void __print_graph_headers_flags(struct trace_array *tr,
1239 					struct seq_file *s, u32 flags)
1240 {
1241 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1242 
1243 	if (lat)
1244 		print_lat_header(s, flags);
1245 
1246 	/* 1st line */
1247 	seq_putc(s, '#');
1248 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1249 		seq_puts(s, "     TIME       ");
1250 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1251 		seq_puts(s, "   REL TIME     ");
1252 	if (flags & TRACE_GRAPH_PRINT_CPU)
1253 		seq_puts(s, " CPU");
1254 	if (flags & TRACE_GRAPH_PRINT_PROC)
1255 		seq_puts(s, "  TASK/PID       ");
1256 	if (lat)
1257 		seq_puts(s, "||||   ");
1258 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1259 		seq_puts(s, "  DURATION   ");
1260 	seq_puts(s, "               FUNCTION CALLS\n");
1261 
1262 	/* 2nd line */
1263 	seq_putc(s, '#');
1264 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1265 		seq_puts(s, "      |         ");
1266 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1267 		seq_puts(s, "      |         ");
1268 	if (flags & TRACE_GRAPH_PRINT_CPU)
1269 		seq_puts(s, " |  ");
1270 	if (flags & TRACE_GRAPH_PRINT_PROC)
1271 		seq_puts(s, "   |    |        ");
1272 	if (lat)
1273 		seq_puts(s, "||||   ");
1274 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1275 		seq_puts(s, "   |   |      ");
1276 	seq_puts(s, "               |   |   |   |\n");
1277 }
1278 
1279 static void print_graph_headers(struct seq_file *s)
1280 {
1281 	print_graph_headers_flags(s, tracer_flags.val);
1282 }
1283 
1284 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1285 {
1286 	struct trace_iterator *iter = s->private;
1287 	struct trace_array *tr = iter->tr;
1288 
1289 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1290 		return;
1291 
1292 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1293 		/* print nothing if the buffers are empty */
1294 		if (trace_empty(iter))
1295 			return;
1296 
1297 		print_trace_header(s, iter);
1298 	}
1299 
1300 	__print_graph_headers_flags(tr, s, flags);
1301 }
1302 
1303 void graph_trace_open(struct trace_iterator *iter)
1304 {
1305 	/* pid and depth on the last trace processed */
1306 	struct fgraph_data *data;
1307 	gfp_t gfpflags;
1308 	int cpu;
1309 
1310 	iter->private = NULL;
1311 
1312 	/* We can be called in atomic context via ftrace_dump() */
1313 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1314 
1315 	data = kzalloc(sizeof(*data), gfpflags);
1316 	if (!data)
1317 		goto out_err;
1318 
1319 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1320 	if (!data->cpu_data)
1321 		goto out_err_free;
1322 
1323 	for_each_possible_cpu(cpu) {
1324 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1325 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1326 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1327 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1328 
1329 		*pid = -1;
1330 		*depth = 0;
1331 		*ignore = 0;
1332 		*depth_irq = -1;
1333 	}
1334 
1335 	iter->private = data;
1336 
1337 	return;
1338 
1339  out_err_free:
1340 	kfree(data);
1341  out_err:
1342 	pr_warn("function graph tracer: not enough memory\n");
1343 }
1344 
1345 void graph_trace_close(struct trace_iterator *iter)
1346 {
1347 	struct fgraph_data *data = iter->private;
1348 
1349 	if (data) {
1350 		free_percpu(data->cpu_data);
1351 		kfree(data);
1352 	}
1353 }
1354 
1355 static int
1356 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1357 {
1358 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1359 		ftrace_graph_skip_irqs = !set;
1360 
1361 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1362 		ftrace_graph_sleep_time_control(set);
1363 
1364 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1365 		ftrace_graph_graph_time_control(set);
1366 
1367 	return 0;
1368 }
1369 
1370 static struct trace_event_functions graph_functions = {
1371 	.trace		= print_graph_function_event,
1372 };
1373 
1374 static struct trace_event graph_trace_entry_event = {
1375 	.type		= TRACE_GRAPH_ENT,
1376 	.funcs		= &graph_functions,
1377 };
1378 
1379 static struct trace_event graph_trace_ret_event = {
1380 	.type		= TRACE_GRAPH_RET,
1381 	.funcs		= &graph_functions
1382 };
1383 
1384 static struct tracer graph_trace __tracer_data = {
1385 	.name		= "function_graph",
1386 	.update_thresh	= graph_trace_update_thresh,
1387 	.open		= graph_trace_open,
1388 	.pipe_open	= graph_trace_open,
1389 	.close		= graph_trace_close,
1390 	.pipe_close	= graph_trace_close,
1391 	.init		= graph_trace_init,
1392 	.reset		= graph_trace_reset,
1393 	.print_line	= print_graph_function,
1394 	.print_header	= print_graph_headers,
1395 	.flags		= &tracer_flags,
1396 	.set_flag	= func_graph_set_flag,
1397 	.allow_instances = true,
1398 #ifdef CONFIG_FTRACE_SELFTEST
1399 	.selftest	= trace_selftest_startup_function_graph,
1400 #endif
1401 };
1402 
1403 
1404 static ssize_t
1405 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1406 		  loff_t *ppos)
1407 {
1408 	unsigned long val;
1409 	int ret;
1410 
1411 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1412 	if (ret)
1413 		return ret;
1414 
1415 	fgraph_max_depth = val;
1416 
1417 	*ppos += cnt;
1418 
1419 	return cnt;
1420 }
1421 
1422 static ssize_t
1423 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1424 		 loff_t *ppos)
1425 {
1426 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1427 	int n;
1428 
1429 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1430 
1431 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1432 }
1433 
1434 static const struct file_operations graph_depth_fops = {
1435 	.open		= tracing_open_generic,
1436 	.write		= graph_depth_write,
1437 	.read		= graph_depth_read,
1438 	.llseek		= generic_file_llseek,
1439 };
1440 
1441 static __init int init_graph_tracefs(void)
1442 {
1443 	int ret;
1444 
1445 	ret = tracing_init_dentry();
1446 	if (ret)
1447 		return 0;
1448 
1449 	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1450 			  NULL, &graph_depth_fops);
1451 
1452 	return 0;
1453 }
1454 fs_initcall(init_graph_tracefs);
1455 
1456 static __init int init_graph_trace(void)
1457 {
1458 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1459 
1460 	if (!register_trace_event(&graph_trace_entry_event)) {
1461 		pr_warn("Warning: could not register graph trace events\n");
1462 		return 1;
1463 	}
1464 
1465 	if (!register_trace_event(&graph_trace_ret_event)) {
1466 		pr_warn("Warning: could not register graph trace events\n");
1467 		return 1;
1468 	}
1469 
1470 	return register_tracer(&graph_trace);
1471 }
1472 
1473 core_initcall(init_graph_trace);
1474