xref: /linux/kernel/trace/trace_functions_graph.c (revision 5a0e3ad6af8660be21ca98a971cd00f331318c05)
1  /*
2   *
3   * Function graph tracer.
4   * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5   * Mostly borrowed from function tracer which
6   * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7   *
8   */
9  #include <linux/debugfs.h>
10  #include <linux/uaccess.h>
11  #include <linux/ftrace.h>
12  #include <linux/slab.h>
13  #include <linux/fs.h>
14  
15  #include "trace.h"
16  #include "trace_output.h"
17  
18  struct fgraph_cpu_data {
19  	pid_t		last_pid;
20  	int		depth;
21  	int		ignore;
22  	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
23  };
24  
25  struct fgraph_data {
26  	struct fgraph_cpu_data		*cpu_data;
27  
28  	/* Place to preserve last processed entry. */
29  	struct ftrace_graph_ent_entry	ent;
30  	struct ftrace_graph_ret_entry	ret;
31  	int				failed;
32  	int				cpu;
33  };
34  
35  #define TRACE_GRAPH_INDENT	2
36  
37  /* Flag options */
38  #define TRACE_GRAPH_PRINT_OVERRUN	0x1
39  #define TRACE_GRAPH_PRINT_CPU		0x2
40  #define TRACE_GRAPH_PRINT_OVERHEAD	0x4
41  #define TRACE_GRAPH_PRINT_PROC		0x8
42  #define TRACE_GRAPH_PRINT_DURATION	0x10
43  #define TRACE_GRAPH_PRINT_ABS_TIME	0X20
44  
45  static struct tracer_opt trace_opts[] = {
46  	/* Display overruns? (for self-debug purpose) */
47  	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
48  	/* Display CPU ? */
49  	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
50  	/* Display Overhead ? */
51  	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
52  	/* Display proc name/pid */
53  	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
54  	/* Display duration of execution */
55  	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
56  	/* Display absolute time of an entry */
57  	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
58  	{ } /* Empty entry */
59  };
60  
61  static struct tracer_flags tracer_flags = {
62  	/* Don't display overruns and proc by default */
63  	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
64  	       TRACE_GRAPH_PRINT_DURATION,
65  	.opts = trace_opts
66  };
67  
68  static struct trace_array *graph_array;
69  
70  
71  /* Add a function return address to the trace stack on thread info.*/
72  int
73  ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
74  			 unsigned long frame_pointer)
75  {
76  	unsigned long long calltime;
77  	int index;
78  
79  	if (!current->ret_stack)
80  		return -EBUSY;
81  
82  	/*
83  	 * We must make sure the ret_stack is tested before we read
84  	 * anything else.
85  	 */
86  	smp_rmb();
87  
88  	/* The return trace stack is full */
89  	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
90  		atomic_inc(&current->trace_overrun);
91  		return -EBUSY;
92  	}
93  
94  	calltime = trace_clock_local();
95  
96  	index = ++current->curr_ret_stack;
97  	barrier();
98  	current->ret_stack[index].ret = ret;
99  	current->ret_stack[index].func = func;
100  	current->ret_stack[index].calltime = calltime;
101  	current->ret_stack[index].subtime = 0;
102  	current->ret_stack[index].fp = frame_pointer;
103  	*depth = index;
104  
105  	return 0;
106  }
107  
108  /* Retrieve a function return address to the trace stack on thread info.*/
109  static void
110  ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
111  			unsigned long frame_pointer)
112  {
113  	int index;
114  
115  	index = current->curr_ret_stack;
116  
117  	if (unlikely(index < 0)) {
118  		ftrace_graph_stop();
119  		WARN_ON(1);
120  		/* Might as well panic, otherwise we have no where to go */
121  		*ret = (unsigned long)panic;
122  		return;
123  	}
124  
125  #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
126  	/*
127  	 * The arch may choose to record the frame pointer used
128  	 * and check it here to make sure that it is what we expect it
129  	 * to be. If gcc does not set the place holder of the return
130  	 * address in the frame pointer, and does a copy instead, then
131  	 * the function graph trace will fail. This test detects this
132  	 * case.
133  	 *
134  	 * Currently, x86_32 with optimize for size (-Os) makes the latest
135  	 * gcc do the above.
136  	 */
137  	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
138  		ftrace_graph_stop();
139  		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
140  		     "  from func %ps return to %lx\n",
141  		     current->ret_stack[index].fp,
142  		     frame_pointer,
143  		     (void *)current->ret_stack[index].func,
144  		     current->ret_stack[index].ret);
145  		*ret = (unsigned long)panic;
146  		return;
147  	}
148  #endif
149  
150  	*ret = current->ret_stack[index].ret;
151  	trace->func = current->ret_stack[index].func;
152  	trace->calltime = current->ret_stack[index].calltime;
153  	trace->overrun = atomic_read(&current->trace_overrun);
154  	trace->depth = index;
155  }
156  
157  /*
158   * Send the trace to the ring-buffer.
159   * @return the original return address.
160   */
161  unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
162  {
163  	struct ftrace_graph_ret trace;
164  	unsigned long ret;
165  
166  	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
167  	trace.rettime = trace_clock_local();
168  	ftrace_graph_return(&trace);
169  	barrier();
170  	current->curr_ret_stack--;
171  
172  	if (unlikely(!ret)) {
173  		ftrace_graph_stop();
174  		WARN_ON(1);
175  		/* Might as well panic. What else to do? */
176  		ret = (unsigned long)panic;
177  	}
178  
179  	return ret;
180  }
181  
182  static int __trace_graph_entry(struct trace_array *tr,
183  				struct ftrace_graph_ent *trace,
184  				unsigned long flags,
185  				int pc)
186  {
187  	struct ftrace_event_call *call = &event_funcgraph_entry;
188  	struct ring_buffer_event *event;
189  	struct ring_buffer *buffer = tr->buffer;
190  	struct ftrace_graph_ent_entry *entry;
191  
192  	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
193  		return 0;
194  
195  	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
196  					  sizeof(*entry), flags, pc);
197  	if (!event)
198  		return 0;
199  	entry	= ring_buffer_event_data(event);
200  	entry->graph_ent			= *trace;
201  	if (!filter_current_check_discard(buffer, call, entry, event))
202  		ring_buffer_unlock_commit(buffer, event);
203  
204  	return 1;
205  }
206  
207  int trace_graph_entry(struct ftrace_graph_ent *trace)
208  {
209  	struct trace_array *tr = graph_array;
210  	struct trace_array_cpu *data;
211  	unsigned long flags;
212  	long disabled;
213  	int ret;
214  	int cpu;
215  	int pc;
216  
217  	if (!ftrace_trace_task(current))
218  		return 0;
219  
220  	/* trace it when it is-nested-in or is a function enabled. */
221  	if (!(trace->depth || ftrace_graph_addr(trace->func)))
222  		return 0;
223  
224  	local_irq_save(flags);
225  	cpu = raw_smp_processor_id();
226  	data = tr->data[cpu];
227  	disabled = atomic_inc_return(&data->disabled);
228  	if (likely(disabled == 1)) {
229  		pc = preempt_count();
230  		ret = __trace_graph_entry(tr, trace, flags, pc);
231  	} else {
232  		ret = 0;
233  	}
234  
235  	atomic_dec(&data->disabled);
236  	local_irq_restore(flags);
237  
238  	return ret;
239  }
240  
241  int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
242  {
243  	if (tracing_thresh)
244  		return 1;
245  	else
246  		return trace_graph_entry(trace);
247  }
248  
249  static void __trace_graph_return(struct trace_array *tr,
250  				struct ftrace_graph_ret *trace,
251  				unsigned long flags,
252  				int pc)
253  {
254  	struct ftrace_event_call *call = &event_funcgraph_exit;
255  	struct ring_buffer_event *event;
256  	struct ring_buffer *buffer = tr->buffer;
257  	struct ftrace_graph_ret_entry *entry;
258  
259  	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
260  		return;
261  
262  	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
263  					  sizeof(*entry), flags, pc);
264  	if (!event)
265  		return;
266  	entry	= ring_buffer_event_data(event);
267  	entry->ret				= *trace;
268  	if (!filter_current_check_discard(buffer, call, entry, event))
269  		ring_buffer_unlock_commit(buffer, event);
270  }
271  
272  void trace_graph_return(struct ftrace_graph_ret *trace)
273  {
274  	struct trace_array *tr = graph_array;
275  	struct trace_array_cpu *data;
276  	unsigned long flags;
277  	long disabled;
278  	int cpu;
279  	int pc;
280  
281  	local_irq_save(flags);
282  	cpu = raw_smp_processor_id();
283  	data = tr->data[cpu];
284  	disabled = atomic_inc_return(&data->disabled);
285  	if (likely(disabled == 1)) {
286  		pc = preempt_count();
287  		__trace_graph_return(tr, trace, flags, pc);
288  	}
289  	atomic_dec(&data->disabled);
290  	local_irq_restore(flags);
291  }
292  
293  void set_graph_array(struct trace_array *tr)
294  {
295  	graph_array = tr;
296  
297  	/* Make graph_array visible before we start tracing */
298  
299  	smp_mb();
300  }
301  
302  void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
303  {
304  	if (tracing_thresh &&
305  	    (trace->rettime - trace->calltime < tracing_thresh))
306  		return;
307  	else
308  		trace_graph_return(trace);
309  }
310  
311  static int graph_trace_init(struct trace_array *tr)
312  {
313  	int ret;
314  
315  	set_graph_array(tr);
316  	if (tracing_thresh)
317  		ret = register_ftrace_graph(&trace_graph_thresh_return,
318  					    &trace_graph_thresh_entry);
319  	else
320  		ret = register_ftrace_graph(&trace_graph_return,
321  					    &trace_graph_entry);
322  	if (ret)
323  		return ret;
324  	tracing_start_cmdline_record();
325  
326  	return 0;
327  }
328  
329  static void graph_trace_reset(struct trace_array *tr)
330  {
331  	tracing_stop_cmdline_record();
332  	unregister_ftrace_graph();
333  }
334  
335  static int max_bytes_for_cpu;
336  
337  static enum print_line_t
338  print_graph_cpu(struct trace_seq *s, int cpu)
339  {
340  	int ret;
341  
342  	/*
343  	 * Start with a space character - to make it stand out
344  	 * to the right a bit when trace output is pasted into
345  	 * email:
346  	 */
347  	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
348  	if (!ret)
349  		return TRACE_TYPE_PARTIAL_LINE;
350  
351  	return TRACE_TYPE_HANDLED;
352  }
353  
354  #define TRACE_GRAPH_PROCINFO_LENGTH	14
355  
356  static enum print_line_t
357  print_graph_proc(struct trace_seq *s, pid_t pid)
358  {
359  	char comm[TASK_COMM_LEN];
360  	/* sign + log10(MAX_INT) + '\0' */
361  	char pid_str[11];
362  	int spaces = 0;
363  	int ret;
364  	int len;
365  	int i;
366  
367  	trace_find_cmdline(pid, comm);
368  	comm[7] = '\0';
369  	sprintf(pid_str, "%d", pid);
370  
371  	/* 1 stands for the "-" character */
372  	len = strlen(comm) + strlen(pid_str) + 1;
373  
374  	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
375  		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
376  
377  	/* First spaces to align center */
378  	for (i = 0; i < spaces / 2; i++) {
379  		ret = trace_seq_printf(s, " ");
380  		if (!ret)
381  			return TRACE_TYPE_PARTIAL_LINE;
382  	}
383  
384  	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
385  	if (!ret)
386  		return TRACE_TYPE_PARTIAL_LINE;
387  
388  	/* Last spaces to align center */
389  	for (i = 0; i < spaces - (spaces / 2); i++) {
390  		ret = trace_seq_printf(s, " ");
391  		if (!ret)
392  			return TRACE_TYPE_PARTIAL_LINE;
393  	}
394  	return TRACE_TYPE_HANDLED;
395  }
396  
397  
398  static enum print_line_t
399  print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
400  {
401  	if (!trace_seq_putc(s, ' '))
402  		return 0;
403  
404  	return trace_print_lat_fmt(s, entry);
405  }
406  
407  /* If the pid changed since the last trace, output this event */
408  static enum print_line_t
409  verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
410  {
411  	pid_t prev_pid;
412  	pid_t *last_pid;
413  	int ret;
414  
415  	if (!data)
416  		return TRACE_TYPE_HANDLED;
417  
418  	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
419  
420  	if (*last_pid == pid)
421  		return TRACE_TYPE_HANDLED;
422  
423  	prev_pid = *last_pid;
424  	*last_pid = pid;
425  
426  	if (prev_pid == -1)
427  		return TRACE_TYPE_HANDLED;
428  /*
429   * Context-switch trace line:
430  
431   ------------------------------------------
432   | 1)  migration/0--1  =>  sshd-1755
433   ------------------------------------------
434  
435   */
436  	ret = trace_seq_printf(s,
437  		" ------------------------------------------\n");
438  	if (!ret)
439  		return TRACE_TYPE_PARTIAL_LINE;
440  
441  	ret = print_graph_cpu(s, cpu);
442  	if (ret == TRACE_TYPE_PARTIAL_LINE)
443  		return TRACE_TYPE_PARTIAL_LINE;
444  
445  	ret = print_graph_proc(s, prev_pid);
446  	if (ret == TRACE_TYPE_PARTIAL_LINE)
447  		return TRACE_TYPE_PARTIAL_LINE;
448  
449  	ret = trace_seq_printf(s, " => ");
450  	if (!ret)
451  		return TRACE_TYPE_PARTIAL_LINE;
452  
453  	ret = print_graph_proc(s, pid);
454  	if (ret == TRACE_TYPE_PARTIAL_LINE)
455  		return TRACE_TYPE_PARTIAL_LINE;
456  
457  	ret = trace_seq_printf(s,
458  		"\n ------------------------------------------\n\n");
459  	if (!ret)
460  		return TRACE_TYPE_PARTIAL_LINE;
461  
462  	return TRACE_TYPE_HANDLED;
463  }
464  
465  static struct ftrace_graph_ret_entry *
466  get_return_for_leaf(struct trace_iterator *iter,
467  		struct ftrace_graph_ent_entry *curr)
468  {
469  	struct fgraph_data *data = iter->private;
470  	struct ring_buffer_iter *ring_iter = NULL;
471  	struct ring_buffer_event *event;
472  	struct ftrace_graph_ret_entry *next;
473  
474  	/*
475  	 * If the previous output failed to write to the seq buffer,
476  	 * then we just reuse the data from before.
477  	 */
478  	if (data && data->failed) {
479  		curr = &data->ent;
480  		next = &data->ret;
481  	} else {
482  
483  		ring_iter = iter->buffer_iter[iter->cpu];
484  
485  		/* First peek to compare current entry and the next one */
486  		if (ring_iter)
487  			event = ring_buffer_iter_peek(ring_iter, NULL);
488  		else {
489  			/*
490  			 * We need to consume the current entry to see
491  			 * the next one.
492  			 */
493  			ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
494  			event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
495  						 NULL);
496  		}
497  
498  		if (!event)
499  			return NULL;
500  
501  		next = ring_buffer_event_data(event);
502  
503  		if (data) {
504  			/*
505  			 * Save current and next entries for later reference
506  			 * if the output fails.
507  			 */
508  			data->ent = *curr;
509  			data->ret = *next;
510  		}
511  	}
512  
513  	if (next->ent.type != TRACE_GRAPH_RET)
514  		return NULL;
515  
516  	if (curr->ent.pid != next->ent.pid ||
517  			curr->graph_ent.func != next->ret.func)
518  		return NULL;
519  
520  	/* this is a leaf, now advance the iterator */
521  	if (ring_iter)
522  		ring_buffer_read(ring_iter, NULL);
523  
524  	return next;
525  }
526  
527  /* Signal a overhead of time execution to the output */
528  static int
529  print_graph_overhead(unsigned long long duration, struct trace_seq *s)
530  {
531  	/* If duration disappear, we don't need anything */
532  	if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
533  		return 1;
534  
535  	/* Non nested entry or return */
536  	if (duration == -1)
537  		return trace_seq_printf(s, "  ");
538  
539  	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
540  		/* Duration exceeded 100 msecs */
541  		if (duration > 100000ULL)
542  			return trace_seq_printf(s, "! ");
543  
544  		/* Duration exceeded 10 msecs */
545  		if (duration > 10000ULL)
546  			return trace_seq_printf(s, "+ ");
547  	}
548  
549  	return trace_seq_printf(s, "  ");
550  }
551  
552  static int print_graph_abs_time(u64 t, struct trace_seq *s)
553  {
554  	unsigned long usecs_rem;
555  
556  	usecs_rem = do_div(t, NSEC_PER_SEC);
557  	usecs_rem /= 1000;
558  
559  	return trace_seq_printf(s, "%5lu.%06lu |  ",
560  			(unsigned long)t, usecs_rem);
561  }
562  
563  static enum print_line_t
564  print_graph_irq(struct trace_iterator *iter, unsigned long addr,
565  		enum trace_type type, int cpu, pid_t pid)
566  {
567  	int ret;
568  	struct trace_seq *s = &iter->seq;
569  
570  	if (addr < (unsigned long)__irqentry_text_start ||
571  		addr >= (unsigned long)__irqentry_text_end)
572  		return TRACE_TYPE_UNHANDLED;
573  
574  	/* Absolute time */
575  	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
576  		ret = print_graph_abs_time(iter->ts, s);
577  		if (!ret)
578  			return TRACE_TYPE_PARTIAL_LINE;
579  	}
580  
581  	/* Cpu */
582  	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
583  		ret = print_graph_cpu(s, cpu);
584  		if (ret == TRACE_TYPE_PARTIAL_LINE)
585  			return TRACE_TYPE_PARTIAL_LINE;
586  	}
587  
588  	/* Proc */
589  	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
590  		ret = print_graph_proc(s, pid);
591  		if (ret == TRACE_TYPE_PARTIAL_LINE)
592  			return TRACE_TYPE_PARTIAL_LINE;
593  		ret = trace_seq_printf(s, " | ");
594  		if (!ret)
595  			return TRACE_TYPE_PARTIAL_LINE;
596  	}
597  
598  	/* No overhead */
599  	ret = print_graph_overhead(-1, s);
600  	if (!ret)
601  		return TRACE_TYPE_PARTIAL_LINE;
602  
603  	if (type == TRACE_GRAPH_ENT)
604  		ret = trace_seq_printf(s, "==========>");
605  	else
606  		ret = trace_seq_printf(s, "<==========");
607  
608  	if (!ret)
609  		return TRACE_TYPE_PARTIAL_LINE;
610  
611  	/* Don't close the duration column if haven't one */
612  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
613  		trace_seq_printf(s, " |");
614  	ret = trace_seq_printf(s, "\n");
615  
616  	if (!ret)
617  		return TRACE_TYPE_PARTIAL_LINE;
618  	return TRACE_TYPE_HANDLED;
619  }
620  
621  enum print_line_t
622  trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
623  {
624  	unsigned long nsecs_rem = do_div(duration, 1000);
625  	/* log10(ULONG_MAX) + '\0' */
626  	char msecs_str[21];
627  	char nsecs_str[5];
628  	int ret, len;
629  	int i;
630  
631  	sprintf(msecs_str, "%lu", (unsigned long) duration);
632  
633  	/* Print msecs */
634  	ret = trace_seq_printf(s, "%s", msecs_str);
635  	if (!ret)
636  		return TRACE_TYPE_PARTIAL_LINE;
637  
638  	len = strlen(msecs_str);
639  
640  	/* Print nsecs (we don't want to exceed 7 numbers) */
641  	if (len < 7) {
642  		snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
643  		ret = trace_seq_printf(s, ".%s", nsecs_str);
644  		if (!ret)
645  			return TRACE_TYPE_PARTIAL_LINE;
646  		len += strlen(nsecs_str);
647  	}
648  
649  	ret = trace_seq_printf(s, " us ");
650  	if (!ret)
651  		return TRACE_TYPE_PARTIAL_LINE;
652  
653  	/* Print remaining spaces to fit the row's width */
654  	for (i = len; i < 7; i++) {
655  		ret = trace_seq_printf(s, " ");
656  		if (!ret)
657  			return TRACE_TYPE_PARTIAL_LINE;
658  	}
659  	return TRACE_TYPE_HANDLED;
660  }
661  
662  static enum print_line_t
663  print_graph_duration(unsigned long long duration, struct trace_seq *s)
664  {
665  	int ret;
666  
667  	ret = trace_print_graph_duration(duration, s);
668  	if (ret != TRACE_TYPE_HANDLED)
669  		return ret;
670  
671  	ret = trace_seq_printf(s, "|  ");
672  	if (!ret)
673  		return TRACE_TYPE_PARTIAL_LINE;
674  
675  	return TRACE_TYPE_HANDLED;
676  }
677  
678  /* Case of a leaf function on its call entry */
679  static enum print_line_t
680  print_graph_entry_leaf(struct trace_iterator *iter,
681  		struct ftrace_graph_ent_entry *entry,
682  		struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
683  {
684  	struct fgraph_data *data = iter->private;
685  	struct ftrace_graph_ret *graph_ret;
686  	struct ftrace_graph_ent *call;
687  	unsigned long long duration;
688  	int ret;
689  	int i;
690  
691  	graph_ret = &ret_entry->ret;
692  	call = &entry->graph_ent;
693  	duration = graph_ret->rettime - graph_ret->calltime;
694  
695  	if (data) {
696  		struct fgraph_cpu_data *cpu_data;
697  		int cpu = iter->cpu;
698  
699  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
700  
701  		/*
702  		 * Comments display at + 1 to depth. Since
703  		 * this is a leaf function, keep the comments
704  		 * equal to this depth.
705  		 */
706  		cpu_data->depth = call->depth - 1;
707  
708  		/* No need to keep this function around for this depth */
709  		if (call->depth < FTRACE_RETFUNC_DEPTH)
710  			cpu_data->enter_funcs[call->depth] = 0;
711  	}
712  
713  	/* Overhead */
714  	ret = print_graph_overhead(duration, s);
715  	if (!ret)
716  		return TRACE_TYPE_PARTIAL_LINE;
717  
718  	/* Duration */
719  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
720  		ret = print_graph_duration(duration, s);
721  		if (ret == TRACE_TYPE_PARTIAL_LINE)
722  			return TRACE_TYPE_PARTIAL_LINE;
723  	}
724  
725  	/* Function */
726  	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
727  		ret = trace_seq_printf(s, " ");
728  		if (!ret)
729  			return TRACE_TYPE_PARTIAL_LINE;
730  	}
731  
732  	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
733  	if (!ret)
734  		return TRACE_TYPE_PARTIAL_LINE;
735  
736  	return TRACE_TYPE_HANDLED;
737  }
738  
739  static enum print_line_t
740  print_graph_entry_nested(struct trace_iterator *iter,
741  			 struct ftrace_graph_ent_entry *entry,
742  			 struct trace_seq *s, int cpu)
743  {
744  	struct ftrace_graph_ent *call = &entry->graph_ent;
745  	struct fgraph_data *data = iter->private;
746  	int ret;
747  	int i;
748  
749  	if (data) {
750  		struct fgraph_cpu_data *cpu_data;
751  		int cpu = iter->cpu;
752  
753  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
754  		cpu_data->depth = call->depth;
755  
756  		/* Save this function pointer to see if the exit matches */
757  		if (call->depth < FTRACE_RETFUNC_DEPTH)
758  			cpu_data->enter_funcs[call->depth] = call->func;
759  	}
760  
761  	/* No overhead */
762  	ret = print_graph_overhead(-1, s);
763  	if (!ret)
764  		return TRACE_TYPE_PARTIAL_LINE;
765  
766  	/* No time */
767  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
768  		ret = trace_seq_printf(s, "            |  ");
769  		if (!ret)
770  			return TRACE_TYPE_PARTIAL_LINE;
771  	}
772  
773  	/* Function */
774  	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
775  		ret = trace_seq_printf(s, " ");
776  		if (!ret)
777  			return TRACE_TYPE_PARTIAL_LINE;
778  	}
779  
780  	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
781  	if (!ret)
782  		return TRACE_TYPE_PARTIAL_LINE;
783  
784  	/*
785  	 * we already consumed the current entry to check the next one
786  	 * and see if this is a leaf.
787  	 */
788  	return TRACE_TYPE_NO_CONSUME;
789  }
790  
791  static enum print_line_t
792  print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
793  		     int type, unsigned long addr)
794  {
795  	struct fgraph_data *data = iter->private;
796  	struct trace_entry *ent = iter->ent;
797  	int cpu = iter->cpu;
798  	int ret;
799  
800  	/* Pid */
801  	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
802  		return TRACE_TYPE_PARTIAL_LINE;
803  
804  	if (type) {
805  		/* Interrupt */
806  		ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
807  		if (ret == TRACE_TYPE_PARTIAL_LINE)
808  			return TRACE_TYPE_PARTIAL_LINE;
809  	}
810  
811  	/* Absolute time */
812  	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
813  		ret = print_graph_abs_time(iter->ts, s);
814  		if (!ret)
815  			return TRACE_TYPE_PARTIAL_LINE;
816  	}
817  
818  	/* Cpu */
819  	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
820  		ret = print_graph_cpu(s, cpu);
821  		if (ret == TRACE_TYPE_PARTIAL_LINE)
822  			return TRACE_TYPE_PARTIAL_LINE;
823  	}
824  
825  	/* Proc */
826  	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
827  		ret = print_graph_proc(s, ent->pid);
828  		if (ret == TRACE_TYPE_PARTIAL_LINE)
829  			return TRACE_TYPE_PARTIAL_LINE;
830  
831  		ret = trace_seq_printf(s, " | ");
832  		if (!ret)
833  			return TRACE_TYPE_PARTIAL_LINE;
834  	}
835  
836  	/* Latency format */
837  	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
838  		ret = print_graph_lat_fmt(s, ent);
839  		if (ret == TRACE_TYPE_PARTIAL_LINE)
840  			return TRACE_TYPE_PARTIAL_LINE;
841  	}
842  
843  	return 0;
844  }
845  
846  static enum print_line_t
847  print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
848  			struct trace_iterator *iter)
849  {
850  	struct fgraph_data *data = iter->private;
851  	struct ftrace_graph_ent *call = &field->graph_ent;
852  	struct ftrace_graph_ret_entry *leaf_ret;
853  	static enum print_line_t ret;
854  	int cpu = iter->cpu;
855  
856  	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
857  		return TRACE_TYPE_PARTIAL_LINE;
858  
859  	leaf_ret = get_return_for_leaf(iter, field);
860  	if (leaf_ret)
861  		ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
862  	else
863  		ret = print_graph_entry_nested(iter, field, s, cpu);
864  
865  	if (data) {
866  		/*
867  		 * If we failed to write our output, then we need to make
868  		 * note of it. Because we already consumed our entry.
869  		 */
870  		if (s->full) {
871  			data->failed = 1;
872  			data->cpu = cpu;
873  		} else
874  			data->failed = 0;
875  	}
876  
877  	return ret;
878  }
879  
880  static enum print_line_t
881  print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
882  		   struct trace_entry *ent, struct trace_iterator *iter)
883  {
884  	unsigned long long duration = trace->rettime - trace->calltime;
885  	struct fgraph_data *data = iter->private;
886  	pid_t pid = ent->pid;
887  	int cpu = iter->cpu;
888  	int func_match = 1;
889  	int ret;
890  	int i;
891  
892  	if (data) {
893  		struct fgraph_cpu_data *cpu_data;
894  		int cpu = iter->cpu;
895  
896  		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
897  
898  		/*
899  		 * Comments display at + 1 to depth. This is the
900  		 * return from a function, we now want the comments
901  		 * to display at the same level of the bracket.
902  		 */
903  		cpu_data->depth = trace->depth - 1;
904  
905  		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
906  			if (cpu_data->enter_funcs[trace->depth] != trace->func)
907  				func_match = 0;
908  			cpu_data->enter_funcs[trace->depth] = 0;
909  		}
910  	}
911  
912  	if (print_graph_prologue(iter, s, 0, 0))
913  		return TRACE_TYPE_PARTIAL_LINE;
914  
915  	/* Overhead */
916  	ret = print_graph_overhead(duration, s);
917  	if (!ret)
918  		return TRACE_TYPE_PARTIAL_LINE;
919  
920  	/* Duration */
921  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
922  		ret = print_graph_duration(duration, s);
923  		if (ret == TRACE_TYPE_PARTIAL_LINE)
924  			return TRACE_TYPE_PARTIAL_LINE;
925  	}
926  
927  	/* Closing brace */
928  	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
929  		ret = trace_seq_printf(s, " ");
930  		if (!ret)
931  			return TRACE_TYPE_PARTIAL_LINE;
932  	}
933  
934  	/*
935  	 * If the return function does not have a matching entry,
936  	 * then the entry was lost. Instead of just printing
937  	 * the '}' and letting the user guess what function this
938  	 * belongs to, write out the function name.
939  	 */
940  	if (func_match) {
941  		ret = trace_seq_printf(s, "}\n");
942  		if (!ret)
943  			return TRACE_TYPE_PARTIAL_LINE;
944  	} else {
945  		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
946  		if (!ret)
947  			return TRACE_TYPE_PARTIAL_LINE;
948  	}
949  
950  	/* Overrun */
951  	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
952  		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
953  					trace->overrun);
954  		if (!ret)
955  			return TRACE_TYPE_PARTIAL_LINE;
956  	}
957  
958  	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
959  	if (ret == TRACE_TYPE_PARTIAL_LINE)
960  		return TRACE_TYPE_PARTIAL_LINE;
961  
962  	return TRACE_TYPE_HANDLED;
963  }
964  
965  static enum print_line_t
966  print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
967  		    struct trace_iterator *iter)
968  {
969  	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
970  	struct fgraph_data *data = iter->private;
971  	struct trace_event *event;
972  	int depth = 0;
973  	int ret;
974  	int i;
975  
976  	if (data)
977  		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
978  
979  	if (print_graph_prologue(iter, s, 0, 0))
980  		return TRACE_TYPE_PARTIAL_LINE;
981  
982  	/* No overhead */
983  	ret = print_graph_overhead(-1, s);
984  	if (!ret)
985  		return TRACE_TYPE_PARTIAL_LINE;
986  
987  	/* No time */
988  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
989  		ret = trace_seq_printf(s, "            |  ");
990  		if (!ret)
991  			return TRACE_TYPE_PARTIAL_LINE;
992  	}
993  
994  	/* Indentation */
995  	if (depth > 0)
996  		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
997  			ret = trace_seq_printf(s, " ");
998  			if (!ret)
999  				return TRACE_TYPE_PARTIAL_LINE;
1000  		}
1001  
1002  	/* The comment */
1003  	ret = trace_seq_printf(s, "/* ");
1004  	if (!ret)
1005  		return TRACE_TYPE_PARTIAL_LINE;
1006  
1007  	switch (iter->ent->type) {
1008  	case TRACE_BPRINT:
1009  		ret = trace_print_bprintk_msg_only(iter);
1010  		if (ret != TRACE_TYPE_HANDLED)
1011  			return ret;
1012  		break;
1013  	case TRACE_PRINT:
1014  		ret = trace_print_printk_msg_only(iter);
1015  		if (ret != TRACE_TYPE_HANDLED)
1016  			return ret;
1017  		break;
1018  	default:
1019  		event = ftrace_find_event(ent->type);
1020  		if (!event)
1021  			return TRACE_TYPE_UNHANDLED;
1022  
1023  		ret = event->trace(iter, sym_flags);
1024  		if (ret != TRACE_TYPE_HANDLED)
1025  			return ret;
1026  	}
1027  
1028  	/* Strip ending newline */
1029  	if (s->buffer[s->len - 1] == '\n') {
1030  		s->buffer[s->len - 1] = '\0';
1031  		s->len--;
1032  	}
1033  
1034  	ret = trace_seq_printf(s, " */\n");
1035  	if (!ret)
1036  		return TRACE_TYPE_PARTIAL_LINE;
1037  
1038  	return TRACE_TYPE_HANDLED;
1039  }
1040  
1041  
1042  enum print_line_t
1043  print_graph_function(struct trace_iterator *iter)
1044  {
1045  	struct ftrace_graph_ent_entry *field;
1046  	struct fgraph_data *data = iter->private;
1047  	struct trace_entry *entry = iter->ent;
1048  	struct trace_seq *s = &iter->seq;
1049  	int cpu = iter->cpu;
1050  	int ret;
1051  
1052  	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1053  		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1054  		return TRACE_TYPE_HANDLED;
1055  	}
1056  
1057  	/*
1058  	 * If the last output failed, there's a possibility we need
1059  	 * to print out the missing entry which would never go out.
1060  	 */
1061  	if (data && data->failed) {
1062  		field = &data->ent;
1063  		iter->cpu = data->cpu;
1064  		ret = print_graph_entry(field, s, iter);
1065  		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1066  			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1067  			ret = TRACE_TYPE_NO_CONSUME;
1068  		}
1069  		iter->cpu = cpu;
1070  		return ret;
1071  	}
1072  
1073  	switch (entry->type) {
1074  	case TRACE_GRAPH_ENT: {
1075  		/*
1076  		 * print_graph_entry() may consume the current event,
1077  		 * thus @field may become invalid, so we need to save it.
1078  		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1079  		 * it can be safely saved at the stack.
1080  		 */
1081  		struct ftrace_graph_ent_entry saved;
1082  		trace_assign_type(field, entry);
1083  		saved = *field;
1084  		return print_graph_entry(&saved, s, iter);
1085  	}
1086  	case TRACE_GRAPH_RET: {
1087  		struct ftrace_graph_ret_entry *field;
1088  		trace_assign_type(field, entry);
1089  		return print_graph_return(&field->ret, s, entry, iter);
1090  	}
1091  	default:
1092  		return print_graph_comment(s, entry, iter);
1093  	}
1094  
1095  	return TRACE_TYPE_HANDLED;
1096  }
1097  
1098  static void print_lat_header(struct seq_file *s)
1099  {
1100  	static const char spaces[] = "                "	/* 16 spaces */
1101  		"    "					/* 4 spaces */
1102  		"                 ";			/* 17 spaces */
1103  	int size = 0;
1104  
1105  	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1106  		size += 16;
1107  	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1108  		size += 4;
1109  	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1110  		size += 17;
1111  
1112  	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1113  	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1114  	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1115  	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1116  	seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
1117  	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
1118  }
1119  
1120  static void print_graph_headers(struct seq_file *s)
1121  {
1122  	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1123  
1124  	if (lat)
1125  		print_lat_header(s);
1126  
1127  	/* 1st line */
1128  	seq_printf(s, "#");
1129  	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1130  		seq_printf(s, "     TIME       ");
1131  	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1132  		seq_printf(s, " CPU");
1133  	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1134  		seq_printf(s, "  TASK/PID       ");
1135  	if (lat)
1136  		seq_printf(s, "|||||");
1137  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1138  		seq_printf(s, "  DURATION   ");
1139  	seq_printf(s, "               FUNCTION CALLS\n");
1140  
1141  	/* 2nd line */
1142  	seq_printf(s, "#");
1143  	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
1144  		seq_printf(s, "      |         ");
1145  	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
1146  		seq_printf(s, " |  ");
1147  	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
1148  		seq_printf(s, "   |    |        ");
1149  	if (lat)
1150  		seq_printf(s, "|||||");
1151  	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
1152  		seq_printf(s, "   |   |      ");
1153  	seq_printf(s, "               |   |   |   |\n");
1154  }
1155  
1156  static void graph_trace_open(struct trace_iterator *iter)
1157  {
1158  	/* pid and depth on the last trace processed */
1159  	struct fgraph_data *data;
1160  	int cpu;
1161  
1162  	iter->private = NULL;
1163  
1164  	data = kzalloc(sizeof(*data), GFP_KERNEL);
1165  	if (!data)
1166  		goto out_err;
1167  
1168  	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1169  	if (!data->cpu_data)
1170  		goto out_err_free;
1171  
1172  	for_each_possible_cpu(cpu) {
1173  		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1174  		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1175  		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1176  		*pid = -1;
1177  		*depth = 0;
1178  		*ignore = 0;
1179  	}
1180  
1181  	iter->private = data;
1182  
1183  	return;
1184  
1185   out_err_free:
1186  	kfree(data);
1187   out_err:
1188  	pr_warning("function graph tracer: not enough memory\n");
1189  }
1190  
1191  static void graph_trace_close(struct trace_iterator *iter)
1192  {
1193  	struct fgraph_data *data = iter->private;
1194  
1195  	if (data) {
1196  		free_percpu(data->cpu_data);
1197  		kfree(data);
1198  	}
1199  }
1200  
1201  static struct tracer graph_trace __read_mostly = {
1202  	.name		= "function_graph",
1203  	.open		= graph_trace_open,
1204  	.pipe_open	= graph_trace_open,
1205  	.close		= graph_trace_close,
1206  	.pipe_close	= graph_trace_close,
1207  	.wait_pipe	= poll_wait_pipe,
1208  	.init		= graph_trace_init,
1209  	.reset		= graph_trace_reset,
1210  	.print_line	= print_graph_function,
1211  	.print_header	= print_graph_headers,
1212  	.flags		= &tracer_flags,
1213  #ifdef CONFIG_FTRACE_SELFTEST
1214  	.selftest	= trace_selftest_startup_function_graph,
1215  #endif
1216  };
1217  
1218  static __init int init_graph_trace(void)
1219  {
1220  	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1221  
1222  	return register_tracer(&graph_trace);
1223  }
1224  
1225  device_initcall(init_graph_trace);
1226