xref: /linux/kernel/trace/trace_functions_graph.c (revision 41dc27e3b9bd41b900f5aea06f86669e54a2cdd6)
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 
15 #include "trace.h"
16 #include "trace_output.h"
17 
18 static bool kill_ftrace_graph;
19 
20 /**
21  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22  *
23  * ftrace_graph_stop() is called when a severe error is detected in
24  * the function graph tracing. This function is called by the critical
25  * paths of function graph to keep those paths from doing any more harm.
26  */
27 bool ftrace_graph_is_dead(void)
28 {
29 	return kill_ftrace_graph;
30 }
31 
32 /**
33  * ftrace_graph_stop - set to permanently disable function graph tracincg
34  *
35  * In case of an error int function graph tracing, this is called
36  * to try to keep function graph tracing from causing any more harm.
37  * Usually this is pretty severe and this is called to try to at least
38  * get a warning out to the user.
39  */
40 void ftrace_graph_stop(void)
41 {
42 	kill_ftrace_graph = true;
43 }
44 
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47 
48 struct fgraph_cpu_data {
49 	pid_t		last_pid;
50 	int		depth;
51 	int		depth_irq;
52 	int		ignore;
53 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55 
56 struct fgraph_data {
57 	struct fgraph_cpu_data __percpu *cpu_data;
58 
59 	/* Place to preserve last processed entry. */
60 	struct ftrace_graph_ent_entry	ent;
61 	struct ftrace_graph_ret_entry	ret;
62 	int				failed;
63 	int				cpu;
64 };
65 
66 #define TRACE_GRAPH_INDENT	2
67 
68 static unsigned int max_depth;
69 
70 static struct tracer_opt trace_opts[] = {
71 	/* Display overruns? (for self-debug purpose) */
72 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 	/* Display CPU ? */
74 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 	/* Display Overhead ? */
76 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 	/* Display proc name/pid */
78 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 	/* Display duration of execution */
80 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 	/* Display absolute time of an entry */
82 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 	/* Display interrupts */
84 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 	/* Display function name after trailing } */
86 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 	{ } /* Empty entry */
88 };
89 
90 static struct tracer_flags tracer_flags = {
91 	/* Don't display overruns, proc, or tail by default */
92 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
93 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
94 	.opts = trace_opts
95 };
96 
97 static struct trace_array *graph_array;
98 
99 /*
100  * DURATION column is being also used to display IRQ signs,
101  * following values are used by print_graph_irq and others
102  * to fill in space into DURATION column.
103  */
104 enum {
105 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
106 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
107 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
108 };
109 
110 static enum print_line_t
111 print_graph_duration(unsigned long long duration, struct trace_seq *s,
112 		     u32 flags);
113 
114 /* Add a function return address to the trace stack on thread info.*/
115 int
116 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
117 			 unsigned long frame_pointer)
118 {
119 	unsigned long long calltime;
120 	int index;
121 
122 	if (unlikely(ftrace_graph_is_dead()))
123 		return -EBUSY;
124 
125 	if (!current->ret_stack)
126 		return -EBUSY;
127 
128 	/*
129 	 * We must make sure the ret_stack is tested before we read
130 	 * anything else.
131 	 */
132 	smp_rmb();
133 
134 	/* The return trace stack is full */
135 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
136 		atomic_inc(&current->trace_overrun);
137 		return -EBUSY;
138 	}
139 
140 	/*
141 	 * The curr_ret_stack is an index to ftrace return stack of
142 	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
143 	 * DEPTH) when the function graph tracer is used.  To support
144 	 * filtering out specific functions, it makes the index
145 	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
146 	 * so when it sees a negative index the ftrace will ignore
147 	 * the record.  And the index gets recovered when returning
148 	 * from the filtered function by adding the FTRACE_NOTRACE_
149 	 * DEPTH and then it'll continue to record functions normally.
150 	 *
151 	 * The curr_ret_stack is initialized to -1 and get increased
152 	 * in this function.  So it can be less than -1 only if it was
153 	 * filtered out via ftrace_graph_notrace_addr() which can be
154 	 * set from set_graph_notrace file in debugfs by user.
155 	 */
156 	if (current->curr_ret_stack < -1)
157 		return -EBUSY;
158 
159 	calltime = trace_clock_local();
160 
161 	index = ++current->curr_ret_stack;
162 	if (ftrace_graph_notrace_addr(func))
163 		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
164 	barrier();
165 	current->ret_stack[index].ret = ret;
166 	current->ret_stack[index].func = func;
167 	current->ret_stack[index].calltime = calltime;
168 	current->ret_stack[index].subtime = 0;
169 	current->ret_stack[index].fp = frame_pointer;
170 	*depth = current->curr_ret_stack;
171 
172 	return 0;
173 }
174 
175 /* Retrieve a function return address to the trace stack on thread info.*/
176 static void
177 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
178 			unsigned long frame_pointer)
179 {
180 	int index;
181 
182 	index = current->curr_ret_stack;
183 
184 	/*
185 	 * A negative index here means that it's just returned from a
186 	 * notrace'd function.  Recover index to get an original
187 	 * return address.  See ftrace_push_return_trace().
188 	 *
189 	 * TODO: Need to check whether the stack gets corrupted.
190 	 */
191 	if (index < 0)
192 		index += FTRACE_NOTRACE_DEPTH;
193 
194 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
195 		ftrace_graph_stop();
196 		WARN_ON(1);
197 		/* Might as well panic, otherwise we have no where to go */
198 		*ret = (unsigned long)panic;
199 		return;
200 	}
201 
202 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
203 	/*
204 	 * The arch may choose to record the frame pointer used
205 	 * and check it here to make sure that it is what we expect it
206 	 * to be. If gcc does not set the place holder of the return
207 	 * address in the frame pointer, and does a copy instead, then
208 	 * the function graph trace will fail. This test detects this
209 	 * case.
210 	 *
211 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
212 	 * gcc do the above.
213 	 *
214 	 * Note, -mfentry does not use frame pointers, and this test
215 	 *  is not needed if CC_USING_FENTRY is set.
216 	 */
217 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
218 		ftrace_graph_stop();
219 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
220 		     "  from func %ps return to %lx\n",
221 		     current->ret_stack[index].fp,
222 		     frame_pointer,
223 		     (void *)current->ret_stack[index].func,
224 		     current->ret_stack[index].ret);
225 		*ret = (unsigned long)panic;
226 		return;
227 	}
228 #endif
229 
230 	*ret = current->ret_stack[index].ret;
231 	trace->func = current->ret_stack[index].func;
232 	trace->calltime = current->ret_stack[index].calltime;
233 	trace->overrun = atomic_read(&current->trace_overrun);
234 	trace->depth = index;
235 }
236 
237 /*
238  * Send the trace to the ring-buffer.
239  * @return the original return address.
240  */
241 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
242 {
243 	struct ftrace_graph_ret trace;
244 	unsigned long ret;
245 
246 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
247 	trace.rettime = trace_clock_local();
248 	barrier();
249 	current->curr_ret_stack--;
250 	/*
251 	 * The curr_ret_stack can be less than -1 only if it was
252 	 * filtered out and it's about to return from the function.
253 	 * Recover the index and continue to trace normal functions.
254 	 */
255 	if (current->curr_ret_stack < -1) {
256 		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
257 		return ret;
258 	}
259 
260 	/*
261 	 * The trace should run after decrementing the ret counter
262 	 * in case an interrupt were to come in. We don't want to
263 	 * lose the interrupt if max_depth is set.
264 	 */
265 	ftrace_graph_return(&trace);
266 
267 	if (unlikely(!ret)) {
268 		ftrace_graph_stop();
269 		WARN_ON(1);
270 		/* Might as well panic. What else to do? */
271 		ret = (unsigned long)panic;
272 	}
273 
274 	return ret;
275 }
276 
277 int __trace_graph_entry(struct trace_array *tr,
278 				struct ftrace_graph_ent *trace,
279 				unsigned long flags,
280 				int pc)
281 {
282 	struct ftrace_event_call *call = &event_funcgraph_entry;
283 	struct ring_buffer_event *event;
284 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
285 	struct ftrace_graph_ent_entry *entry;
286 
287 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
288 		return 0;
289 
290 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
291 					  sizeof(*entry), flags, pc);
292 	if (!event)
293 		return 0;
294 	entry	= ring_buffer_event_data(event);
295 	entry->graph_ent			= *trace;
296 	if (!call_filter_check_discard(call, entry, buffer, event))
297 		__buffer_unlock_commit(buffer, event);
298 
299 	return 1;
300 }
301 
302 static inline int ftrace_graph_ignore_irqs(void)
303 {
304 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
305 		return 0;
306 
307 	return in_irq();
308 }
309 
310 int trace_graph_entry(struct ftrace_graph_ent *trace)
311 {
312 	struct trace_array *tr = graph_array;
313 	struct trace_array_cpu *data;
314 	unsigned long flags;
315 	long disabled;
316 	int ret;
317 	int cpu;
318 	int pc;
319 
320 	if (!ftrace_trace_task(current))
321 		return 0;
322 
323 	/* trace it when it is-nested-in or is a function enabled. */
324 	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
325 	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
326 	    (max_depth && trace->depth >= max_depth))
327 		return 0;
328 
329 	/*
330 	 * Do not trace a function if it's filtered by set_graph_notrace.
331 	 * Make the index of ret stack negative to indicate that it should
332 	 * ignore further functions.  But it needs its own ret stack entry
333 	 * to recover the original index in order to continue tracing after
334 	 * returning from the function.
335 	 */
336 	if (ftrace_graph_notrace_addr(trace->func))
337 		return 1;
338 
339 	local_irq_save(flags);
340 	cpu = raw_smp_processor_id();
341 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
342 	disabled = atomic_inc_return(&data->disabled);
343 	if (likely(disabled == 1)) {
344 		pc = preempt_count();
345 		ret = __trace_graph_entry(tr, trace, flags, pc);
346 	} else {
347 		ret = 0;
348 	}
349 
350 	atomic_dec(&data->disabled);
351 	local_irq_restore(flags);
352 
353 	return ret;
354 }
355 
356 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
357 {
358 	if (tracing_thresh)
359 		return 1;
360 	else
361 		return trace_graph_entry(trace);
362 }
363 
364 static void
365 __trace_graph_function(struct trace_array *tr,
366 		unsigned long ip, unsigned long flags, int pc)
367 {
368 	u64 time = trace_clock_local();
369 	struct ftrace_graph_ent ent = {
370 		.func  = ip,
371 		.depth = 0,
372 	};
373 	struct ftrace_graph_ret ret = {
374 		.func     = ip,
375 		.depth    = 0,
376 		.calltime = time,
377 		.rettime  = time,
378 	};
379 
380 	__trace_graph_entry(tr, &ent, flags, pc);
381 	__trace_graph_return(tr, &ret, flags, pc);
382 }
383 
384 void
385 trace_graph_function(struct trace_array *tr,
386 		unsigned long ip, unsigned long parent_ip,
387 		unsigned long flags, int pc)
388 {
389 	__trace_graph_function(tr, ip, flags, pc);
390 }
391 
392 void __trace_graph_return(struct trace_array *tr,
393 				struct ftrace_graph_ret *trace,
394 				unsigned long flags,
395 				int pc)
396 {
397 	struct ftrace_event_call *call = &event_funcgraph_exit;
398 	struct ring_buffer_event *event;
399 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
400 	struct ftrace_graph_ret_entry *entry;
401 
402 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
403 		return;
404 
405 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
406 					  sizeof(*entry), flags, pc);
407 	if (!event)
408 		return;
409 	entry	= ring_buffer_event_data(event);
410 	entry->ret				= *trace;
411 	if (!call_filter_check_discard(call, entry, buffer, event))
412 		__buffer_unlock_commit(buffer, event);
413 }
414 
415 void trace_graph_return(struct ftrace_graph_ret *trace)
416 {
417 	struct trace_array *tr = graph_array;
418 	struct trace_array_cpu *data;
419 	unsigned long flags;
420 	long disabled;
421 	int cpu;
422 	int pc;
423 
424 	local_irq_save(flags);
425 	cpu = raw_smp_processor_id();
426 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
427 	disabled = atomic_inc_return(&data->disabled);
428 	if (likely(disabled == 1)) {
429 		pc = preempt_count();
430 		__trace_graph_return(tr, trace, flags, pc);
431 	}
432 	atomic_dec(&data->disabled);
433 	local_irq_restore(flags);
434 }
435 
436 void set_graph_array(struct trace_array *tr)
437 {
438 	graph_array = tr;
439 
440 	/* Make graph_array visible before we start tracing */
441 
442 	smp_mb();
443 }
444 
445 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
446 {
447 	if (tracing_thresh &&
448 	    (trace->rettime - trace->calltime < tracing_thresh))
449 		return;
450 	else
451 		trace_graph_return(trace);
452 }
453 
454 static int graph_trace_init(struct trace_array *tr)
455 {
456 	int ret;
457 
458 	set_graph_array(tr);
459 	if (tracing_thresh)
460 		ret = register_ftrace_graph(&trace_graph_thresh_return,
461 					    &trace_graph_thresh_entry);
462 	else
463 		ret = register_ftrace_graph(&trace_graph_return,
464 					    &trace_graph_entry);
465 	if (ret)
466 		return ret;
467 	tracing_start_cmdline_record();
468 
469 	return 0;
470 }
471 
472 static void graph_trace_reset(struct trace_array *tr)
473 {
474 	tracing_stop_cmdline_record();
475 	unregister_ftrace_graph();
476 }
477 
478 static int max_bytes_for_cpu;
479 
480 static enum print_line_t
481 print_graph_cpu(struct trace_seq *s, int cpu)
482 {
483 	int ret;
484 
485 	/*
486 	 * Start with a space character - to make it stand out
487 	 * to the right a bit when trace output is pasted into
488 	 * email:
489 	 */
490 	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
491 	if (!ret)
492 		return TRACE_TYPE_PARTIAL_LINE;
493 
494 	return TRACE_TYPE_HANDLED;
495 }
496 
497 #define TRACE_GRAPH_PROCINFO_LENGTH	14
498 
499 static enum print_line_t
500 print_graph_proc(struct trace_seq *s, pid_t pid)
501 {
502 	char comm[TASK_COMM_LEN];
503 	/* sign + log10(MAX_INT) + '\0' */
504 	char pid_str[11];
505 	int spaces = 0;
506 	int ret;
507 	int len;
508 	int i;
509 
510 	trace_find_cmdline(pid, comm);
511 	comm[7] = '\0';
512 	sprintf(pid_str, "%d", pid);
513 
514 	/* 1 stands for the "-" character */
515 	len = strlen(comm) + strlen(pid_str) + 1;
516 
517 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
518 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
519 
520 	/* First spaces to align center */
521 	for (i = 0; i < spaces / 2; i++) {
522 		ret = trace_seq_putc(s, ' ');
523 		if (!ret)
524 			return TRACE_TYPE_PARTIAL_LINE;
525 	}
526 
527 	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
528 	if (!ret)
529 		return TRACE_TYPE_PARTIAL_LINE;
530 
531 	/* Last spaces to align center */
532 	for (i = 0; i < spaces - (spaces / 2); i++) {
533 		ret = trace_seq_putc(s, ' ');
534 		if (!ret)
535 			return TRACE_TYPE_PARTIAL_LINE;
536 	}
537 	return TRACE_TYPE_HANDLED;
538 }
539 
540 
541 static enum print_line_t
542 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
543 {
544 	if (!trace_seq_putc(s, ' '))
545 		return 0;
546 
547 	return trace_print_lat_fmt(s, entry);
548 }
549 
550 /* If the pid changed since the last trace, output this event */
551 static enum print_line_t
552 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
553 {
554 	pid_t prev_pid;
555 	pid_t *last_pid;
556 	int ret;
557 
558 	if (!data)
559 		return TRACE_TYPE_HANDLED;
560 
561 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
562 
563 	if (*last_pid == pid)
564 		return TRACE_TYPE_HANDLED;
565 
566 	prev_pid = *last_pid;
567 	*last_pid = pid;
568 
569 	if (prev_pid == -1)
570 		return TRACE_TYPE_HANDLED;
571 /*
572  * Context-switch trace line:
573 
574  ------------------------------------------
575  | 1)  migration/0--1  =>  sshd-1755
576  ------------------------------------------
577 
578  */
579 	ret = trace_seq_puts(s,
580 		" ------------------------------------------\n");
581 	if (!ret)
582 		return TRACE_TYPE_PARTIAL_LINE;
583 
584 	ret = print_graph_cpu(s, cpu);
585 	if (ret == TRACE_TYPE_PARTIAL_LINE)
586 		return TRACE_TYPE_PARTIAL_LINE;
587 
588 	ret = print_graph_proc(s, prev_pid);
589 	if (ret == TRACE_TYPE_PARTIAL_LINE)
590 		return TRACE_TYPE_PARTIAL_LINE;
591 
592 	ret = trace_seq_puts(s, " => ");
593 	if (!ret)
594 		return TRACE_TYPE_PARTIAL_LINE;
595 
596 	ret = print_graph_proc(s, pid);
597 	if (ret == TRACE_TYPE_PARTIAL_LINE)
598 		return TRACE_TYPE_PARTIAL_LINE;
599 
600 	ret = trace_seq_puts(s,
601 		"\n ------------------------------------------\n\n");
602 	if (!ret)
603 		return TRACE_TYPE_PARTIAL_LINE;
604 
605 	return TRACE_TYPE_HANDLED;
606 }
607 
608 static struct ftrace_graph_ret_entry *
609 get_return_for_leaf(struct trace_iterator *iter,
610 		struct ftrace_graph_ent_entry *curr)
611 {
612 	struct fgraph_data *data = iter->private;
613 	struct ring_buffer_iter *ring_iter = NULL;
614 	struct ring_buffer_event *event;
615 	struct ftrace_graph_ret_entry *next;
616 
617 	/*
618 	 * If the previous output failed to write to the seq buffer,
619 	 * then we just reuse the data from before.
620 	 */
621 	if (data && data->failed) {
622 		curr = &data->ent;
623 		next = &data->ret;
624 	} else {
625 
626 		ring_iter = trace_buffer_iter(iter, iter->cpu);
627 
628 		/* First peek to compare current entry and the next one */
629 		if (ring_iter)
630 			event = ring_buffer_iter_peek(ring_iter, NULL);
631 		else {
632 			/*
633 			 * We need to consume the current entry to see
634 			 * the next one.
635 			 */
636 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
637 					    NULL, NULL);
638 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
639 						 NULL, NULL);
640 		}
641 
642 		if (!event)
643 			return NULL;
644 
645 		next = ring_buffer_event_data(event);
646 
647 		if (data) {
648 			/*
649 			 * Save current and next entries for later reference
650 			 * if the output fails.
651 			 */
652 			data->ent = *curr;
653 			/*
654 			 * If the next event is not a return type, then
655 			 * we only care about what type it is. Otherwise we can
656 			 * safely copy the entire event.
657 			 */
658 			if (next->ent.type == TRACE_GRAPH_RET)
659 				data->ret = *next;
660 			else
661 				data->ret.ent.type = next->ent.type;
662 		}
663 	}
664 
665 	if (next->ent.type != TRACE_GRAPH_RET)
666 		return NULL;
667 
668 	if (curr->ent.pid != next->ent.pid ||
669 			curr->graph_ent.func != next->ret.func)
670 		return NULL;
671 
672 	/* this is a leaf, now advance the iterator */
673 	if (ring_iter)
674 		ring_buffer_read(ring_iter, NULL);
675 
676 	return next;
677 }
678 
679 static int print_graph_abs_time(u64 t, struct trace_seq *s)
680 {
681 	unsigned long usecs_rem;
682 
683 	usecs_rem = do_div(t, NSEC_PER_SEC);
684 	usecs_rem /= 1000;
685 
686 	return trace_seq_printf(s, "%5lu.%06lu |  ",
687 			(unsigned long)t, usecs_rem);
688 }
689 
690 static enum print_line_t
691 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
692 		enum trace_type type, int cpu, pid_t pid, u32 flags)
693 {
694 	int ret;
695 	struct trace_seq *s = &iter->seq;
696 
697 	if (addr < (unsigned long)__irqentry_text_start ||
698 		addr >= (unsigned long)__irqentry_text_end)
699 		return TRACE_TYPE_UNHANDLED;
700 
701 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
702 		/* Absolute time */
703 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
704 			ret = print_graph_abs_time(iter->ts, s);
705 			if (!ret)
706 				return TRACE_TYPE_PARTIAL_LINE;
707 		}
708 
709 		/* Cpu */
710 		if (flags & TRACE_GRAPH_PRINT_CPU) {
711 			ret = print_graph_cpu(s, cpu);
712 			if (ret == TRACE_TYPE_PARTIAL_LINE)
713 				return TRACE_TYPE_PARTIAL_LINE;
714 		}
715 
716 		/* Proc */
717 		if (flags & TRACE_GRAPH_PRINT_PROC) {
718 			ret = print_graph_proc(s, pid);
719 			if (ret == TRACE_TYPE_PARTIAL_LINE)
720 				return TRACE_TYPE_PARTIAL_LINE;
721 			ret = trace_seq_puts(s, " | ");
722 			if (!ret)
723 				return TRACE_TYPE_PARTIAL_LINE;
724 		}
725 	}
726 
727 	/* No overhead */
728 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
729 	if (ret != TRACE_TYPE_HANDLED)
730 		return ret;
731 
732 	if (type == TRACE_GRAPH_ENT)
733 		ret = trace_seq_puts(s, "==========>");
734 	else
735 		ret = trace_seq_puts(s, "<==========");
736 
737 	if (!ret)
738 		return TRACE_TYPE_PARTIAL_LINE;
739 
740 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
741 	if (ret != TRACE_TYPE_HANDLED)
742 		return ret;
743 
744 	ret = trace_seq_putc(s, '\n');
745 
746 	if (!ret)
747 		return TRACE_TYPE_PARTIAL_LINE;
748 	return TRACE_TYPE_HANDLED;
749 }
750 
751 enum print_line_t
752 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
753 {
754 	unsigned long nsecs_rem = do_div(duration, 1000);
755 	/* log10(ULONG_MAX) + '\0' */
756 	char msecs_str[21];
757 	char nsecs_str[5];
758 	int ret, len;
759 	int i;
760 
761 	sprintf(msecs_str, "%lu", (unsigned long) duration);
762 
763 	/* Print msecs */
764 	ret = trace_seq_printf(s, "%s", msecs_str);
765 	if (!ret)
766 		return TRACE_TYPE_PARTIAL_LINE;
767 
768 	len = strlen(msecs_str);
769 
770 	/* Print nsecs (we don't want to exceed 7 numbers) */
771 	if (len < 7) {
772 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
773 
774 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
775 		ret = trace_seq_printf(s, ".%s", nsecs_str);
776 		if (!ret)
777 			return TRACE_TYPE_PARTIAL_LINE;
778 		len += strlen(nsecs_str);
779 	}
780 
781 	ret = trace_seq_puts(s, " us ");
782 	if (!ret)
783 		return TRACE_TYPE_PARTIAL_LINE;
784 
785 	/* Print remaining spaces to fit the row's width */
786 	for (i = len; i < 7; i++) {
787 		ret = trace_seq_putc(s, ' ');
788 		if (!ret)
789 			return TRACE_TYPE_PARTIAL_LINE;
790 	}
791 	return TRACE_TYPE_HANDLED;
792 }
793 
794 static enum print_line_t
795 print_graph_duration(unsigned long long duration, struct trace_seq *s,
796 		     u32 flags)
797 {
798 	int ret = -1;
799 
800 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
801 	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
802 			return TRACE_TYPE_HANDLED;
803 
804 	/* No real adata, just filling the column with spaces */
805 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
806 	case FLAGS_FILL_FULL:
807 		ret = trace_seq_puts(s, "              |  ");
808 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
809 	case FLAGS_FILL_START:
810 		ret = trace_seq_puts(s, "  ");
811 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
812 	case FLAGS_FILL_END:
813 		ret = trace_seq_puts(s, " |");
814 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
815 	}
816 
817 	/* Signal a overhead of time execution to the output */
818 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
819 		/* Duration exceeded 100 msecs */
820 		if (duration > 100000ULL)
821 			ret = trace_seq_puts(s, "! ");
822 		/* Duration exceeded 10 msecs */
823 		else if (duration > 10000ULL)
824 			ret = trace_seq_puts(s, "+ ");
825 	}
826 
827 	/*
828 	 * The -1 means we either did not exceed the duration tresholds
829 	 * or we dont want to print out the overhead. Either way we need
830 	 * to fill out the space.
831 	 */
832 	if (ret == -1)
833 		ret = trace_seq_puts(s, "  ");
834 
835 	/* Catching here any failure happenned above */
836 	if (!ret)
837 		return TRACE_TYPE_PARTIAL_LINE;
838 
839 	ret = trace_print_graph_duration(duration, s);
840 	if (ret != TRACE_TYPE_HANDLED)
841 		return ret;
842 
843 	ret = trace_seq_puts(s, "|  ");
844 	if (!ret)
845 		return TRACE_TYPE_PARTIAL_LINE;
846 
847 	return TRACE_TYPE_HANDLED;
848 }
849 
850 /* Case of a leaf function on its call entry */
851 static enum print_line_t
852 print_graph_entry_leaf(struct trace_iterator *iter,
853 		struct ftrace_graph_ent_entry *entry,
854 		struct ftrace_graph_ret_entry *ret_entry,
855 		struct trace_seq *s, u32 flags)
856 {
857 	struct fgraph_data *data = iter->private;
858 	struct ftrace_graph_ret *graph_ret;
859 	struct ftrace_graph_ent *call;
860 	unsigned long long duration;
861 	int ret;
862 	int i;
863 
864 	graph_ret = &ret_entry->ret;
865 	call = &entry->graph_ent;
866 	duration = graph_ret->rettime - graph_ret->calltime;
867 
868 	if (data) {
869 		struct fgraph_cpu_data *cpu_data;
870 		int cpu = iter->cpu;
871 
872 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
873 
874 		/*
875 		 * Comments display at + 1 to depth. Since
876 		 * this is a leaf function, keep the comments
877 		 * equal to this depth.
878 		 */
879 		cpu_data->depth = call->depth - 1;
880 
881 		/* No need to keep this function around for this depth */
882 		if (call->depth < FTRACE_RETFUNC_DEPTH)
883 			cpu_data->enter_funcs[call->depth] = 0;
884 	}
885 
886 	/* Overhead and duration */
887 	ret = print_graph_duration(duration, s, flags);
888 	if (ret == TRACE_TYPE_PARTIAL_LINE)
889 		return TRACE_TYPE_PARTIAL_LINE;
890 
891 	/* Function */
892 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
893 		ret = trace_seq_putc(s, ' ');
894 		if (!ret)
895 			return TRACE_TYPE_PARTIAL_LINE;
896 	}
897 
898 	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
899 	if (!ret)
900 		return TRACE_TYPE_PARTIAL_LINE;
901 
902 	return TRACE_TYPE_HANDLED;
903 }
904 
905 static enum print_line_t
906 print_graph_entry_nested(struct trace_iterator *iter,
907 			 struct ftrace_graph_ent_entry *entry,
908 			 struct trace_seq *s, int cpu, u32 flags)
909 {
910 	struct ftrace_graph_ent *call = &entry->graph_ent;
911 	struct fgraph_data *data = iter->private;
912 	int ret;
913 	int i;
914 
915 	if (data) {
916 		struct fgraph_cpu_data *cpu_data;
917 		int cpu = iter->cpu;
918 
919 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
920 		cpu_data->depth = call->depth;
921 
922 		/* Save this function pointer to see if the exit matches */
923 		if (call->depth < FTRACE_RETFUNC_DEPTH)
924 			cpu_data->enter_funcs[call->depth] = call->func;
925 	}
926 
927 	/* No time */
928 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
929 	if (ret != TRACE_TYPE_HANDLED)
930 		return ret;
931 
932 	/* Function */
933 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
934 		ret = trace_seq_putc(s, ' ');
935 		if (!ret)
936 			return TRACE_TYPE_PARTIAL_LINE;
937 	}
938 
939 	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
940 	if (!ret)
941 		return TRACE_TYPE_PARTIAL_LINE;
942 
943 	/*
944 	 * we already consumed the current entry to check the next one
945 	 * and see if this is a leaf.
946 	 */
947 	return TRACE_TYPE_NO_CONSUME;
948 }
949 
950 static enum print_line_t
951 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
952 		     int type, unsigned long addr, u32 flags)
953 {
954 	struct fgraph_data *data = iter->private;
955 	struct trace_entry *ent = iter->ent;
956 	int cpu = iter->cpu;
957 	int ret;
958 
959 	/* Pid */
960 	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
961 		return TRACE_TYPE_PARTIAL_LINE;
962 
963 	if (type) {
964 		/* Interrupt */
965 		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
966 		if (ret == TRACE_TYPE_PARTIAL_LINE)
967 			return TRACE_TYPE_PARTIAL_LINE;
968 	}
969 
970 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
971 		return 0;
972 
973 	/* Absolute time */
974 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
975 		ret = print_graph_abs_time(iter->ts, s);
976 		if (!ret)
977 			return TRACE_TYPE_PARTIAL_LINE;
978 	}
979 
980 	/* Cpu */
981 	if (flags & TRACE_GRAPH_PRINT_CPU) {
982 		ret = print_graph_cpu(s, cpu);
983 		if (ret == TRACE_TYPE_PARTIAL_LINE)
984 			return TRACE_TYPE_PARTIAL_LINE;
985 	}
986 
987 	/* Proc */
988 	if (flags & TRACE_GRAPH_PRINT_PROC) {
989 		ret = print_graph_proc(s, ent->pid);
990 		if (ret == TRACE_TYPE_PARTIAL_LINE)
991 			return TRACE_TYPE_PARTIAL_LINE;
992 
993 		ret = trace_seq_puts(s, " | ");
994 		if (!ret)
995 			return TRACE_TYPE_PARTIAL_LINE;
996 	}
997 
998 	/* Latency format */
999 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1000 		ret = print_graph_lat_fmt(s, ent);
1001 		if (ret == TRACE_TYPE_PARTIAL_LINE)
1002 			return TRACE_TYPE_PARTIAL_LINE;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 /*
1009  * Entry check for irq code
1010  *
1011  * returns 1 if
1012  *  - we are inside irq code
1013  *  - we just entered irq code
1014  *
1015  * retunns 0 if
1016  *  - funcgraph-interrupts option is set
1017  *  - we are not inside irq code
1018  */
1019 static int
1020 check_irq_entry(struct trace_iterator *iter, u32 flags,
1021 		unsigned long addr, int depth)
1022 {
1023 	int cpu = iter->cpu;
1024 	int *depth_irq;
1025 	struct fgraph_data *data = iter->private;
1026 
1027 	/*
1028 	 * If we are either displaying irqs, or we got called as
1029 	 * a graph event and private data does not exist,
1030 	 * then we bypass the irq check.
1031 	 */
1032 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1033 	    (!data))
1034 		return 0;
1035 
1036 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1037 
1038 	/*
1039 	 * We are inside the irq code
1040 	 */
1041 	if (*depth_irq >= 0)
1042 		return 1;
1043 
1044 	if ((addr < (unsigned long)__irqentry_text_start) ||
1045 	    (addr >= (unsigned long)__irqentry_text_end))
1046 		return 0;
1047 
1048 	/*
1049 	 * We are entering irq code.
1050 	 */
1051 	*depth_irq = depth;
1052 	return 1;
1053 }
1054 
1055 /*
1056  * Return check for irq code
1057  *
1058  * returns 1 if
1059  *  - we are inside irq code
1060  *  - we just left irq code
1061  *
1062  * returns 0 if
1063  *  - funcgraph-interrupts option is set
1064  *  - we are not inside irq code
1065  */
1066 static int
1067 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1068 {
1069 	int cpu = iter->cpu;
1070 	int *depth_irq;
1071 	struct fgraph_data *data = iter->private;
1072 
1073 	/*
1074 	 * If we are either displaying irqs, or we got called as
1075 	 * a graph event and private data does not exist,
1076 	 * then we bypass the irq check.
1077 	 */
1078 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1079 	    (!data))
1080 		return 0;
1081 
1082 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1083 
1084 	/*
1085 	 * We are not inside the irq code.
1086 	 */
1087 	if (*depth_irq == -1)
1088 		return 0;
1089 
1090 	/*
1091 	 * We are inside the irq code, and this is returning entry.
1092 	 * Let's not trace it and clear the entry depth, since
1093 	 * we are out of irq code.
1094 	 *
1095 	 * This condition ensures that we 'leave the irq code' once
1096 	 * we are out of the entry depth. Thus protecting us from
1097 	 * the RETURN entry loss.
1098 	 */
1099 	if (*depth_irq >= depth) {
1100 		*depth_irq = -1;
1101 		return 1;
1102 	}
1103 
1104 	/*
1105 	 * We are inside the irq code, and this is not the entry.
1106 	 */
1107 	return 1;
1108 }
1109 
1110 static enum print_line_t
1111 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1112 			struct trace_iterator *iter, u32 flags)
1113 {
1114 	struct fgraph_data *data = iter->private;
1115 	struct ftrace_graph_ent *call = &field->graph_ent;
1116 	struct ftrace_graph_ret_entry *leaf_ret;
1117 	static enum print_line_t ret;
1118 	int cpu = iter->cpu;
1119 
1120 	if (check_irq_entry(iter, flags, call->func, call->depth))
1121 		return TRACE_TYPE_HANDLED;
1122 
1123 	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1124 		return TRACE_TYPE_PARTIAL_LINE;
1125 
1126 	leaf_ret = get_return_for_leaf(iter, field);
1127 	if (leaf_ret)
1128 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1129 	else
1130 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1131 
1132 	if (data) {
1133 		/*
1134 		 * If we failed to write our output, then we need to make
1135 		 * note of it. Because we already consumed our entry.
1136 		 */
1137 		if (s->full) {
1138 			data->failed = 1;
1139 			data->cpu = cpu;
1140 		} else
1141 			data->failed = 0;
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 static enum print_line_t
1148 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1149 		   struct trace_entry *ent, struct trace_iterator *iter,
1150 		   u32 flags)
1151 {
1152 	unsigned long long duration = trace->rettime - trace->calltime;
1153 	struct fgraph_data *data = iter->private;
1154 	pid_t pid = ent->pid;
1155 	int cpu = iter->cpu;
1156 	int func_match = 1;
1157 	int ret;
1158 	int i;
1159 
1160 	if (check_irq_return(iter, flags, trace->depth))
1161 		return TRACE_TYPE_HANDLED;
1162 
1163 	if (data) {
1164 		struct fgraph_cpu_data *cpu_data;
1165 		int cpu = iter->cpu;
1166 
1167 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1168 
1169 		/*
1170 		 * Comments display at + 1 to depth. This is the
1171 		 * return from a function, we now want the comments
1172 		 * to display at the same level of the bracket.
1173 		 */
1174 		cpu_data->depth = trace->depth - 1;
1175 
1176 		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1177 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1178 				func_match = 0;
1179 			cpu_data->enter_funcs[trace->depth] = 0;
1180 		}
1181 	}
1182 
1183 	if (print_graph_prologue(iter, s, 0, 0, flags))
1184 		return TRACE_TYPE_PARTIAL_LINE;
1185 
1186 	/* Overhead and duration */
1187 	ret = print_graph_duration(duration, s, flags);
1188 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1189 		return TRACE_TYPE_PARTIAL_LINE;
1190 
1191 	/* Closing brace */
1192 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1193 		ret = trace_seq_putc(s, ' ');
1194 		if (!ret)
1195 			return TRACE_TYPE_PARTIAL_LINE;
1196 	}
1197 
1198 	/*
1199 	 * If the return function does not have a matching entry,
1200 	 * then the entry was lost. Instead of just printing
1201 	 * the '}' and letting the user guess what function this
1202 	 * belongs to, write out the function name. Always do
1203 	 * that if the funcgraph-tail option is enabled.
1204 	 */
1205 	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1206 		ret = trace_seq_puts(s, "}\n");
1207 		if (!ret)
1208 			return TRACE_TYPE_PARTIAL_LINE;
1209 	} else {
1210 		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1211 		if (!ret)
1212 			return TRACE_TYPE_PARTIAL_LINE;
1213 	}
1214 
1215 	/* Overrun */
1216 	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1217 		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1218 					trace->overrun);
1219 		if (!ret)
1220 			return TRACE_TYPE_PARTIAL_LINE;
1221 	}
1222 
1223 	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1224 			      cpu, pid, flags);
1225 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1226 		return TRACE_TYPE_PARTIAL_LINE;
1227 
1228 	return TRACE_TYPE_HANDLED;
1229 }
1230 
1231 static enum print_line_t
1232 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1233 		    struct trace_iterator *iter, u32 flags)
1234 {
1235 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1236 	struct fgraph_data *data = iter->private;
1237 	struct trace_event *event;
1238 	int depth = 0;
1239 	int ret;
1240 	int i;
1241 
1242 	if (data)
1243 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1244 
1245 	if (print_graph_prologue(iter, s, 0, 0, flags))
1246 		return TRACE_TYPE_PARTIAL_LINE;
1247 
1248 	/* No time */
1249 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1250 	if (ret != TRACE_TYPE_HANDLED)
1251 		return ret;
1252 
1253 	/* Indentation */
1254 	if (depth > 0)
1255 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1256 			ret = trace_seq_putc(s, ' ');
1257 			if (!ret)
1258 				return TRACE_TYPE_PARTIAL_LINE;
1259 		}
1260 
1261 	/* The comment */
1262 	ret = trace_seq_puts(s, "/* ");
1263 	if (!ret)
1264 		return TRACE_TYPE_PARTIAL_LINE;
1265 
1266 	switch (iter->ent->type) {
1267 	case TRACE_BPRINT:
1268 		ret = trace_print_bprintk_msg_only(iter);
1269 		if (ret != TRACE_TYPE_HANDLED)
1270 			return ret;
1271 		break;
1272 	case TRACE_PRINT:
1273 		ret = trace_print_printk_msg_only(iter);
1274 		if (ret != TRACE_TYPE_HANDLED)
1275 			return ret;
1276 		break;
1277 	default:
1278 		event = ftrace_find_event(ent->type);
1279 		if (!event)
1280 			return TRACE_TYPE_UNHANDLED;
1281 
1282 		ret = event->funcs->trace(iter, sym_flags, event);
1283 		if (ret != TRACE_TYPE_HANDLED)
1284 			return ret;
1285 	}
1286 
1287 	/* Strip ending newline */
1288 	if (s->buffer[s->len - 1] == '\n') {
1289 		s->buffer[s->len - 1] = '\0';
1290 		s->len--;
1291 	}
1292 
1293 	ret = trace_seq_puts(s, " */\n");
1294 	if (!ret)
1295 		return TRACE_TYPE_PARTIAL_LINE;
1296 
1297 	return TRACE_TYPE_HANDLED;
1298 }
1299 
1300 
1301 enum print_line_t
1302 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1303 {
1304 	struct ftrace_graph_ent_entry *field;
1305 	struct fgraph_data *data = iter->private;
1306 	struct trace_entry *entry = iter->ent;
1307 	struct trace_seq *s = &iter->seq;
1308 	int cpu = iter->cpu;
1309 	int ret;
1310 
1311 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1312 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1313 		return TRACE_TYPE_HANDLED;
1314 	}
1315 
1316 	/*
1317 	 * If the last output failed, there's a possibility we need
1318 	 * to print out the missing entry which would never go out.
1319 	 */
1320 	if (data && data->failed) {
1321 		field = &data->ent;
1322 		iter->cpu = data->cpu;
1323 		ret = print_graph_entry(field, s, iter, flags);
1324 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1325 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1326 			ret = TRACE_TYPE_NO_CONSUME;
1327 		}
1328 		iter->cpu = cpu;
1329 		return ret;
1330 	}
1331 
1332 	switch (entry->type) {
1333 	case TRACE_GRAPH_ENT: {
1334 		/*
1335 		 * print_graph_entry() may consume the current event,
1336 		 * thus @field may become invalid, so we need to save it.
1337 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1338 		 * it can be safely saved at the stack.
1339 		 */
1340 		struct ftrace_graph_ent_entry saved;
1341 		trace_assign_type(field, entry);
1342 		saved = *field;
1343 		return print_graph_entry(&saved, s, iter, flags);
1344 	}
1345 	case TRACE_GRAPH_RET: {
1346 		struct ftrace_graph_ret_entry *field;
1347 		trace_assign_type(field, entry);
1348 		return print_graph_return(&field->ret, s, entry, iter, flags);
1349 	}
1350 	case TRACE_STACK:
1351 	case TRACE_FN:
1352 		/* dont trace stack and functions as comments */
1353 		return TRACE_TYPE_UNHANDLED;
1354 
1355 	default:
1356 		return print_graph_comment(s, entry, iter, flags);
1357 	}
1358 
1359 	return TRACE_TYPE_HANDLED;
1360 }
1361 
1362 static enum print_line_t
1363 print_graph_function(struct trace_iterator *iter)
1364 {
1365 	return print_graph_function_flags(iter, tracer_flags.val);
1366 }
1367 
1368 static enum print_line_t
1369 print_graph_function_event(struct trace_iterator *iter, int flags,
1370 			   struct trace_event *event)
1371 {
1372 	return print_graph_function(iter);
1373 }
1374 
1375 static void print_lat_header(struct seq_file *s, u32 flags)
1376 {
1377 	static const char spaces[] = "                "	/* 16 spaces */
1378 		"    "					/* 4 spaces */
1379 		"                 ";			/* 17 spaces */
1380 	int size = 0;
1381 
1382 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1383 		size += 16;
1384 	if (flags & TRACE_GRAPH_PRINT_CPU)
1385 		size += 4;
1386 	if (flags & TRACE_GRAPH_PRINT_PROC)
1387 		size += 17;
1388 
1389 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1390 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1391 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1392 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1393 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1394 }
1395 
1396 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1397 {
1398 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1399 
1400 	if (lat)
1401 		print_lat_header(s, flags);
1402 
1403 	/* 1st line */
1404 	seq_printf(s, "#");
1405 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1406 		seq_printf(s, "     TIME       ");
1407 	if (flags & TRACE_GRAPH_PRINT_CPU)
1408 		seq_printf(s, " CPU");
1409 	if (flags & TRACE_GRAPH_PRINT_PROC)
1410 		seq_printf(s, "  TASK/PID       ");
1411 	if (lat)
1412 		seq_printf(s, "||||");
1413 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1414 		seq_printf(s, "  DURATION   ");
1415 	seq_printf(s, "               FUNCTION CALLS\n");
1416 
1417 	/* 2nd line */
1418 	seq_printf(s, "#");
1419 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1420 		seq_printf(s, "      |         ");
1421 	if (flags & TRACE_GRAPH_PRINT_CPU)
1422 		seq_printf(s, " |  ");
1423 	if (flags & TRACE_GRAPH_PRINT_PROC)
1424 		seq_printf(s, "   |    |        ");
1425 	if (lat)
1426 		seq_printf(s, "||||");
1427 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1428 		seq_printf(s, "   |   |      ");
1429 	seq_printf(s, "               |   |   |   |\n");
1430 }
1431 
1432 void print_graph_headers(struct seq_file *s)
1433 {
1434 	print_graph_headers_flags(s, tracer_flags.val);
1435 }
1436 
1437 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1438 {
1439 	struct trace_iterator *iter = s->private;
1440 
1441 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1442 		return;
1443 
1444 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1445 		/* print nothing if the buffers are empty */
1446 		if (trace_empty(iter))
1447 			return;
1448 
1449 		print_trace_header(s, iter);
1450 	}
1451 
1452 	__print_graph_headers_flags(s, flags);
1453 }
1454 
1455 void graph_trace_open(struct trace_iterator *iter)
1456 {
1457 	/* pid and depth on the last trace processed */
1458 	struct fgraph_data *data;
1459 	int cpu;
1460 
1461 	iter->private = NULL;
1462 
1463 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1464 	if (!data)
1465 		goto out_err;
1466 
1467 	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1468 	if (!data->cpu_data)
1469 		goto out_err_free;
1470 
1471 	for_each_possible_cpu(cpu) {
1472 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1473 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1474 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1475 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1476 
1477 		*pid = -1;
1478 		*depth = 0;
1479 		*ignore = 0;
1480 		*depth_irq = -1;
1481 	}
1482 
1483 	iter->private = data;
1484 
1485 	return;
1486 
1487  out_err_free:
1488 	kfree(data);
1489  out_err:
1490 	pr_warning("function graph tracer: not enough memory\n");
1491 }
1492 
1493 void graph_trace_close(struct trace_iterator *iter)
1494 {
1495 	struct fgraph_data *data = iter->private;
1496 
1497 	if (data) {
1498 		free_percpu(data->cpu_data);
1499 		kfree(data);
1500 	}
1501 }
1502 
1503 static int
1504 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1505 {
1506 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1507 		ftrace_graph_skip_irqs = !set;
1508 
1509 	return 0;
1510 }
1511 
1512 static struct trace_event_functions graph_functions = {
1513 	.trace		= print_graph_function_event,
1514 };
1515 
1516 static struct trace_event graph_trace_entry_event = {
1517 	.type		= TRACE_GRAPH_ENT,
1518 	.funcs		= &graph_functions,
1519 };
1520 
1521 static struct trace_event graph_trace_ret_event = {
1522 	.type		= TRACE_GRAPH_RET,
1523 	.funcs		= &graph_functions
1524 };
1525 
1526 static struct tracer graph_trace __tracer_data = {
1527 	.name		= "function_graph",
1528 	.open		= graph_trace_open,
1529 	.pipe_open	= graph_trace_open,
1530 	.close		= graph_trace_close,
1531 	.pipe_close	= graph_trace_close,
1532 	.init		= graph_trace_init,
1533 	.reset		= graph_trace_reset,
1534 	.print_line	= print_graph_function,
1535 	.print_header	= print_graph_headers,
1536 	.flags		= &tracer_flags,
1537 	.set_flag	= func_graph_set_flag,
1538 #ifdef CONFIG_FTRACE_SELFTEST
1539 	.selftest	= trace_selftest_startup_function_graph,
1540 #endif
1541 };
1542 
1543 
1544 static ssize_t
1545 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1546 		  loff_t *ppos)
1547 {
1548 	unsigned long val;
1549 	int ret;
1550 
1551 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1552 	if (ret)
1553 		return ret;
1554 
1555 	max_depth = val;
1556 
1557 	*ppos += cnt;
1558 
1559 	return cnt;
1560 }
1561 
1562 static ssize_t
1563 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1564 		 loff_t *ppos)
1565 {
1566 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1567 	int n;
1568 
1569 	n = sprintf(buf, "%d\n", max_depth);
1570 
1571 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1572 }
1573 
1574 static const struct file_operations graph_depth_fops = {
1575 	.open		= tracing_open_generic,
1576 	.write		= graph_depth_write,
1577 	.read		= graph_depth_read,
1578 	.llseek		= generic_file_llseek,
1579 };
1580 
1581 static __init int init_graph_debugfs(void)
1582 {
1583 	struct dentry *d_tracer;
1584 
1585 	d_tracer = tracing_init_dentry();
1586 	if (!d_tracer)
1587 		return 0;
1588 
1589 	trace_create_file("max_graph_depth", 0644, d_tracer,
1590 			  NULL, &graph_depth_fops);
1591 
1592 	return 0;
1593 }
1594 fs_initcall(init_graph_debugfs);
1595 
1596 static __init int init_graph_trace(void)
1597 {
1598 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1599 
1600 	if (!register_ftrace_event(&graph_trace_entry_event)) {
1601 		pr_warning("Warning: could not register graph trace events\n");
1602 		return 1;
1603 	}
1604 
1605 	if (!register_ftrace_event(&graph_trace_ret_event)) {
1606 		pr_warning("Warning: could not register graph trace events\n");
1607 		return 1;
1608 	}
1609 
1610 	return register_tracer(&graph_trace);
1611 }
1612 
1613 core_initcall(init_graph_trace);
1614