xref: /linux/kernel/trace/trace_functions_graph.c (revision 18c4078489fe064cc0ed08be3381cf2f26657f5f)
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13 
14 #include "trace.h"
15 #include "trace_output.h"
16 
17 struct fgraph_data {
18 	pid_t		last_pid;
19 	int		depth;
20 };
21 
22 #define TRACE_GRAPH_INDENT	2
23 
24 /* Flag options */
25 #define TRACE_GRAPH_PRINT_OVERRUN	0x1
26 #define TRACE_GRAPH_PRINT_CPU		0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD	0x4
28 #define TRACE_GRAPH_PRINT_PROC		0x8
29 #define TRACE_GRAPH_PRINT_DURATION	0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME	0X20
31 
32 static struct tracer_opt trace_opts[] = {
33 	/* Display overruns? (for self-debug purpose) */
34 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
35 	/* Display CPU ? */
36 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
37 	/* Display Overhead ? */
38 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39 	/* Display proc name/pid */
40 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41 	/* Display duration of execution */
42 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43 	/* Display absolute time of an entry */
44 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45 	{ } /* Empty entry */
46 };
47 
48 static struct tracer_flags tracer_flags = {
49 	/* Don't display overruns and proc by default */
50 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51 	       TRACE_GRAPH_PRINT_DURATION,
52 	.opts = trace_opts
53 };
54 
55 /* pid on the last trace processed */
56 
57 
58 /* Add a function return address to the trace stack on thread info.*/
59 int
60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
61 			 unsigned long frame_pointer)
62 {
63 	unsigned long long calltime;
64 	int index;
65 
66 	if (!current->ret_stack)
67 		return -EBUSY;
68 
69 	/*
70 	 * We must make sure the ret_stack is tested before we read
71 	 * anything else.
72 	 */
73 	smp_rmb();
74 
75 	/* The return trace stack is full */
76 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
77 		atomic_inc(&current->trace_overrun);
78 		return -EBUSY;
79 	}
80 
81 	calltime = trace_clock_local();
82 
83 	index = ++current->curr_ret_stack;
84 	barrier();
85 	current->ret_stack[index].ret = ret;
86 	current->ret_stack[index].func = func;
87 	current->ret_stack[index].calltime = calltime;
88 	current->ret_stack[index].subtime = 0;
89 	current->ret_stack[index].fp = frame_pointer;
90 	*depth = index;
91 
92 	return 0;
93 }
94 
95 /* Retrieve a function return address to the trace stack on thread info.*/
96 static void
97 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
98 			unsigned long frame_pointer)
99 {
100 	int index;
101 
102 	index = current->curr_ret_stack;
103 
104 	if (unlikely(index < 0)) {
105 		ftrace_graph_stop();
106 		WARN_ON(1);
107 		/* Might as well panic, otherwise we have no where to go */
108 		*ret = (unsigned long)panic;
109 		return;
110 	}
111 
112 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
113 	/*
114 	 * The arch may choose to record the frame pointer used
115 	 * and check it here to make sure that it is what we expect it
116 	 * to be. If gcc does not set the place holder of the return
117 	 * address in the frame pointer, and does a copy instead, then
118 	 * the function graph trace will fail. This test detects this
119 	 * case.
120 	 *
121 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
122 	 * gcc do the above.
123 	 */
124 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
125 		ftrace_graph_stop();
126 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
127 		     "  from func %pF return to %lx\n",
128 		     current->ret_stack[index].fp,
129 		     frame_pointer,
130 		     (void *)current->ret_stack[index].func,
131 		     current->ret_stack[index].ret);
132 		*ret = (unsigned long)panic;
133 		return;
134 	}
135 #endif
136 
137 	*ret = current->ret_stack[index].ret;
138 	trace->func = current->ret_stack[index].func;
139 	trace->calltime = current->ret_stack[index].calltime;
140 	trace->overrun = atomic_read(&current->trace_overrun);
141 	trace->depth = index;
142 }
143 
144 /*
145  * Send the trace to the ring-buffer.
146  * @return the original return address.
147  */
148 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
149 {
150 	struct ftrace_graph_ret trace;
151 	unsigned long ret;
152 
153 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
154 	trace.rettime = trace_clock_local();
155 	ftrace_graph_return(&trace);
156 	barrier();
157 	current->curr_ret_stack--;
158 
159 	if (unlikely(!ret)) {
160 		ftrace_graph_stop();
161 		WARN_ON(1);
162 		/* Might as well panic. What else to do? */
163 		ret = (unsigned long)panic;
164 	}
165 
166 	return ret;
167 }
168 
169 static int graph_trace_init(struct trace_array *tr)
170 {
171 	int ret = register_ftrace_graph(&trace_graph_return,
172 					&trace_graph_entry);
173 	if (ret)
174 		return ret;
175 	tracing_start_cmdline_record();
176 
177 	return 0;
178 }
179 
180 static void graph_trace_reset(struct trace_array *tr)
181 {
182 	tracing_stop_cmdline_record();
183 	unregister_ftrace_graph();
184 }
185 
186 static inline int log10_cpu(int nb)
187 {
188 	if (nb / 100)
189 		return 3;
190 	if (nb / 10)
191 		return 2;
192 	return 1;
193 }
194 
195 static enum print_line_t
196 print_graph_cpu(struct trace_seq *s, int cpu)
197 {
198 	int i;
199 	int ret;
200 	int log10_this = log10_cpu(cpu);
201 	int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
202 
203 
204 	/*
205 	 * Start with a space character - to make it stand out
206 	 * to the right a bit when trace output is pasted into
207 	 * email:
208 	 */
209 	ret = trace_seq_printf(s, " ");
210 
211 	/*
212 	 * Tricky - we space the CPU field according to the max
213 	 * number of online CPUs. On a 2-cpu system it would take
214 	 * a maximum of 1 digit - on a 128 cpu system it would
215 	 * take up to 3 digits:
216 	 */
217 	for (i = 0; i < log10_all - log10_this; i++) {
218 		ret = trace_seq_printf(s, " ");
219 		if (!ret)
220 			return TRACE_TYPE_PARTIAL_LINE;
221 	}
222 	ret = trace_seq_printf(s, "%d) ", cpu);
223 	if (!ret)
224 		return TRACE_TYPE_PARTIAL_LINE;
225 
226 	return TRACE_TYPE_HANDLED;
227 }
228 
229 #define TRACE_GRAPH_PROCINFO_LENGTH	14
230 
231 static enum print_line_t
232 print_graph_proc(struct trace_seq *s, pid_t pid)
233 {
234 	char comm[TASK_COMM_LEN];
235 	/* sign + log10(MAX_INT) + '\0' */
236 	char pid_str[11];
237 	int spaces = 0;
238 	int ret;
239 	int len;
240 	int i;
241 
242 	trace_find_cmdline(pid, comm);
243 	comm[7] = '\0';
244 	sprintf(pid_str, "%d", pid);
245 
246 	/* 1 stands for the "-" character */
247 	len = strlen(comm) + strlen(pid_str) + 1;
248 
249 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
250 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
251 
252 	/* First spaces to align center */
253 	for (i = 0; i < spaces / 2; i++) {
254 		ret = trace_seq_printf(s, " ");
255 		if (!ret)
256 			return TRACE_TYPE_PARTIAL_LINE;
257 	}
258 
259 	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
260 	if (!ret)
261 		return TRACE_TYPE_PARTIAL_LINE;
262 
263 	/* Last spaces to align center */
264 	for (i = 0; i < spaces - (spaces / 2); i++) {
265 		ret = trace_seq_printf(s, " ");
266 		if (!ret)
267 			return TRACE_TYPE_PARTIAL_LINE;
268 	}
269 	return TRACE_TYPE_HANDLED;
270 }
271 
272 
273 /* If the pid changed since the last trace, output this event */
274 static enum print_line_t
275 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
276 {
277 	pid_t prev_pid;
278 	pid_t *last_pid;
279 	int ret;
280 
281 	if (!data)
282 		return TRACE_TYPE_HANDLED;
283 
284 	last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
285 
286 	if (*last_pid == pid)
287 		return TRACE_TYPE_HANDLED;
288 
289 	prev_pid = *last_pid;
290 	*last_pid = pid;
291 
292 	if (prev_pid == -1)
293 		return TRACE_TYPE_HANDLED;
294 /*
295  * Context-switch trace line:
296 
297  ------------------------------------------
298  | 1)  migration/0--1  =>  sshd-1755
299  ------------------------------------------
300 
301  */
302 	ret = trace_seq_printf(s,
303 		" ------------------------------------------\n");
304 	if (!ret)
305 		return TRACE_TYPE_PARTIAL_LINE;
306 
307 	ret = print_graph_cpu(s, cpu);
308 	if (ret == TRACE_TYPE_PARTIAL_LINE)
309 		return TRACE_TYPE_PARTIAL_LINE;
310 
311 	ret = print_graph_proc(s, prev_pid);
312 	if (ret == TRACE_TYPE_PARTIAL_LINE)
313 		return TRACE_TYPE_PARTIAL_LINE;
314 
315 	ret = trace_seq_printf(s, " => ");
316 	if (!ret)
317 		return TRACE_TYPE_PARTIAL_LINE;
318 
319 	ret = print_graph_proc(s, pid);
320 	if (ret == TRACE_TYPE_PARTIAL_LINE)
321 		return TRACE_TYPE_PARTIAL_LINE;
322 
323 	ret = trace_seq_printf(s,
324 		"\n ------------------------------------------\n\n");
325 	if (!ret)
326 		return TRACE_TYPE_PARTIAL_LINE;
327 
328 	return TRACE_TYPE_HANDLED;
329 }
330 
331 static struct ftrace_graph_ret_entry *
332 get_return_for_leaf(struct trace_iterator *iter,
333 		struct ftrace_graph_ent_entry *curr)
334 {
335 	struct ring_buffer_iter *ring_iter;
336 	struct ring_buffer_event *event;
337 	struct ftrace_graph_ret_entry *next;
338 
339 	ring_iter = iter->buffer_iter[iter->cpu];
340 
341 	/* First peek to compare current entry and the next one */
342 	if (ring_iter)
343 		event = ring_buffer_iter_peek(ring_iter, NULL);
344 	else {
345 	/* We need to consume the current entry to see the next one */
346 		ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
347 		event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
348 					NULL);
349 	}
350 
351 	if (!event)
352 		return NULL;
353 
354 	next = ring_buffer_event_data(event);
355 
356 	if (next->ent.type != TRACE_GRAPH_RET)
357 		return NULL;
358 
359 	if (curr->ent.pid != next->ent.pid ||
360 			curr->graph_ent.func != next->ret.func)
361 		return NULL;
362 
363 	/* this is a leaf, now advance the iterator */
364 	if (ring_iter)
365 		ring_buffer_read(ring_iter, NULL);
366 
367 	return next;
368 }
369 
370 /* Signal a overhead of time execution to the output */
371 static int
372 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
373 {
374 	/* If duration disappear, we don't need anything */
375 	if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
376 		return 1;
377 
378 	/* Non nested entry or return */
379 	if (duration == -1)
380 		return trace_seq_printf(s, "  ");
381 
382 	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
383 		/* Duration exceeded 100 msecs */
384 		if (duration > 100000ULL)
385 			return trace_seq_printf(s, "! ");
386 
387 		/* Duration exceeded 10 msecs */
388 		if (duration > 10000ULL)
389 			return trace_seq_printf(s, "+ ");
390 	}
391 
392 	return trace_seq_printf(s, "  ");
393 }
394 
395 static int print_graph_abs_time(u64 t, struct trace_seq *s)
396 {
397 	unsigned long usecs_rem;
398 
399 	usecs_rem = do_div(t, NSEC_PER_SEC);
400 	usecs_rem /= 1000;
401 
402 	return trace_seq_printf(s, "%5lu.%06lu |  ",
403 			(unsigned long)t, usecs_rem);
404 }
405 
406 static enum print_line_t
407 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
408 		enum trace_type type, int cpu, pid_t pid)
409 {
410 	int ret;
411 	struct trace_seq *s = &iter->seq;
412 
413 	if (addr < (unsigned long)__irqentry_text_start ||
414 		addr >= (unsigned long)__irqentry_text_end)
415 		return TRACE_TYPE_UNHANDLED;
416 
417 	/* Absolute time */
418 	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
419 		ret = print_graph_abs_time(iter->ts, s);
420 		if (!ret)
421 			return TRACE_TYPE_PARTIAL_LINE;
422 	}
423 
424 	/* Cpu */
425 	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
426 		ret = print_graph_cpu(s, cpu);
427 		if (ret == TRACE_TYPE_PARTIAL_LINE)
428 			return TRACE_TYPE_PARTIAL_LINE;
429 	}
430 	/* Proc */
431 	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
432 		ret = print_graph_proc(s, pid);
433 		if (ret == TRACE_TYPE_PARTIAL_LINE)
434 			return TRACE_TYPE_PARTIAL_LINE;
435 		ret = trace_seq_printf(s, " | ");
436 		if (!ret)
437 			return TRACE_TYPE_PARTIAL_LINE;
438 	}
439 
440 	/* No overhead */
441 	ret = print_graph_overhead(-1, s);
442 	if (!ret)
443 		return TRACE_TYPE_PARTIAL_LINE;
444 
445 	if (type == TRACE_GRAPH_ENT)
446 		ret = trace_seq_printf(s, "==========>");
447 	else
448 		ret = trace_seq_printf(s, "<==========");
449 
450 	if (!ret)
451 		return TRACE_TYPE_PARTIAL_LINE;
452 
453 	/* Don't close the duration column if haven't one */
454 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
455 		trace_seq_printf(s, " |");
456 	ret = trace_seq_printf(s, "\n");
457 
458 	if (!ret)
459 		return TRACE_TYPE_PARTIAL_LINE;
460 	return TRACE_TYPE_HANDLED;
461 }
462 
463 enum print_line_t
464 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
465 {
466 	unsigned long nsecs_rem = do_div(duration, 1000);
467 	/* log10(ULONG_MAX) + '\0' */
468 	char msecs_str[21];
469 	char nsecs_str[5];
470 	int ret, len;
471 	int i;
472 
473 	sprintf(msecs_str, "%lu", (unsigned long) duration);
474 
475 	/* Print msecs */
476 	ret = trace_seq_printf(s, "%s", msecs_str);
477 	if (!ret)
478 		return TRACE_TYPE_PARTIAL_LINE;
479 
480 	len = strlen(msecs_str);
481 
482 	/* Print nsecs (we don't want to exceed 7 numbers) */
483 	if (len < 7) {
484 		snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
485 		ret = trace_seq_printf(s, ".%s", nsecs_str);
486 		if (!ret)
487 			return TRACE_TYPE_PARTIAL_LINE;
488 		len += strlen(nsecs_str);
489 	}
490 
491 	ret = trace_seq_printf(s, " us ");
492 	if (!ret)
493 		return TRACE_TYPE_PARTIAL_LINE;
494 
495 	/* Print remaining spaces to fit the row's width */
496 	for (i = len; i < 7; i++) {
497 		ret = trace_seq_printf(s, " ");
498 		if (!ret)
499 			return TRACE_TYPE_PARTIAL_LINE;
500 	}
501 	return TRACE_TYPE_HANDLED;
502 }
503 
504 static enum print_line_t
505 print_graph_duration(unsigned long long duration, struct trace_seq *s)
506 {
507 	int ret;
508 
509 	ret = trace_print_graph_duration(duration, s);
510 	if (ret != TRACE_TYPE_HANDLED)
511 		return ret;
512 
513 	ret = trace_seq_printf(s, "|  ");
514 	if (!ret)
515 		return TRACE_TYPE_PARTIAL_LINE;
516 
517 	return TRACE_TYPE_HANDLED;
518 }
519 
520 /* Case of a leaf function on its call entry */
521 static enum print_line_t
522 print_graph_entry_leaf(struct trace_iterator *iter,
523 		struct ftrace_graph_ent_entry *entry,
524 		struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
525 {
526 	struct fgraph_data *data = iter->private;
527 	struct ftrace_graph_ret *graph_ret;
528 	struct ftrace_graph_ent *call;
529 	unsigned long long duration;
530 	int ret;
531 	int i;
532 
533 	graph_ret = &ret_entry->ret;
534 	call = &entry->graph_ent;
535 	duration = graph_ret->rettime - graph_ret->calltime;
536 
537 	if (data) {
538 		int cpu = iter->cpu;
539 		int *depth = &(per_cpu_ptr(data, cpu)->depth);
540 
541 		/*
542 		 * Comments display at + 1 to depth. Since
543 		 * this is a leaf function, keep the comments
544 		 * equal to this depth.
545 		 */
546 		*depth = call->depth - 1;
547 	}
548 
549 	/* Overhead */
550 	ret = print_graph_overhead(duration, s);
551 	if (!ret)
552 		return TRACE_TYPE_PARTIAL_LINE;
553 
554 	/* Duration */
555 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
556 		ret = print_graph_duration(duration, s);
557 		if (ret == TRACE_TYPE_PARTIAL_LINE)
558 			return TRACE_TYPE_PARTIAL_LINE;
559 	}
560 
561 	/* Function */
562 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
563 		ret = trace_seq_printf(s, " ");
564 		if (!ret)
565 			return TRACE_TYPE_PARTIAL_LINE;
566 	}
567 
568 	ret = seq_print_ip_sym(s, call->func, 0);
569 	if (!ret)
570 		return TRACE_TYPE_PARTIAL_LINE;
571 
572 	ret = trace_seq_printf(s, "();\n");
573 	if (!ret)
574 		return TRACE_TYPE_PARTIAL_LINE;
575 
576 	return TRACE_TYPE_HANDLED;
577 }
578 
579 static enum print_line_t
580 print_graph_entry_nested(struct trace_iterator *iter,
581 			 struct ftrace_graph_ent_entry *entry,
582 			 struct trace_seq *s, int cpu)
583 {
584 	struct ftrace_graph_ent *call = &entry->graph_ent;
585 	struct fgraph_data *data = iter->private;
586 	int ret;
587 	int i;
588 
589 	if (data) {
590 		int cpu = iter->cpu;
591 		int *depth = &(per_cpu_ptr(data, cpu)->depth);
592 
593 		*depth = call->depth;
594 	}
595 
596 	/* No overhead */
597 	ret = print_graph_overhead(-1, s);
598 	if (!ret)
599 		return TRACE_TYPE_PARTIAL_LINE;
600 
601 	/* No time */
602 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
603 		ret = trace_seq_printf(s, "            |  ");
604 		if (!ret)
605 			return TRACE_TYPE_PARTIAL_LINE;
606 	}
607 
608 	/* Function */
609 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
610 		ret = trace_seq_printf(s, " ");
611 		if (!ret)
612 			return TRACE_TYPE_PARTIAL_LINE;
613 	}
614 
615 	ret = seq_print_ip_sym(s, call->func, 0);
616 	if (!ret)
617 		return TRACE_TYPE_PARTIAL_LINE;
618 
619 	ret = trace_seq_printf(s, "() {\n");
620 	if (!ret)
621 		return TRACE_TYPE_PARTIAL_LINE;
622 
623 	/*
624 	 * we already consumed the current entry to check the next one
625 	 * and see if this is a leaf.
626 	 */
627 	return TRACE_TYPE_NO_CONSUME;
628 }
629 
630 static enum print_line_t
631 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
632 		     int type, unsigned long addr)
633 {
634 	struct fgraph_data *data = iter->private;
635 	struct trace_entry *ent = iter->ent;
636 	int cpu = iter->cpu;
637 	int ret;
638 
639 	/* Pid */
640 	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
641 		return TRACE_TYPE_PARTIAL_LINE;
642 
643 	if (type) {
644 		/* Interrupt */
645 		ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
646 		if (ret == TRACE_TYPE_PARTIAL_LINE)
647 			return TRACE_TYPE_PARTIAL_LINE;
648 	}
649 
650 	/* Absolute time */
651 	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
652 		ret = print_graph_abs_time(iter->ts, s);
653 		if (!ret)
654 			return TRACE_TYPE_PARTIAL_LINE;
655 	}
656 
657 	/* Cpu */
658 	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
659 		ret = print_graph_cpu(s, cpu);
660 		if (ret == TRACE_TYPE_PARTIAL_LINE)
661 			return TRACE_TYPE_PARTIAL_LINE;
662 	}
663 
664 	/* Proc */
665 	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
666 		ret = print_graph_proc(s, ent->pid);
667 		if (ret == TRACE_TYPE_PARTIAL_LINE)
668 			return TRACE_TYPE_PARTIAL_LINE;
669 
670 		ret = trace_seq_printf(s, " | ");
671 		if (!ret)
672 			return TRACE_TYPE_PARTIAL_LINE;
673 	}
674 
675 	return 0;
676 }
677 
678 static enum print_line_t
679 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
680 			struct trace_iterator *iter)
681 {
682 	int cpu = iter->cpu;
683 	struct ftrace_graph_ent *call = &field->graph_ent;
684 	struct ftrace_graph_ret_entry *leaf_ret;
685 
686 	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
687 		return TRACE_TYPE_PARTIAL_LINE;
688 
689 	leaf_ret = get_return_for_leaf(iter, field);
690 	if (leaf_ret)
691 		return print_graph_entry_leaf(iter, field, leaf_ret, s);
692 	else
693 		return print_graph_entry_nested(iter, field, s, cpu);
694 
695 }
696 
697 static enum print_line_t
698 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
699 		   struct trace_entry *ent, struct trace_iterator *iter)
700 {
701 	unsigned long long duration = trace->rettime - trace->calltime;
702 	struct fgraph_data *data = iter->private;
703 	pid_t pid = ent->pid;
704 	int cpu = iter->cpu;
705 	int ret;
706 	int i;
707 
708 	if (data) {
709 		int cpu = iter->cpu;
710 		int *depth = &(per_cpu_ptr(data, cpu)->depth);
711 
712 		/*
713 		 * Comments display at + 1 to depth. This is the
714 		 * return from a function, we now want the comments
715 		 * to display at the same level of the bracket.
716 		 */
717 		*depth = trace->depth - 1;
718 	}
719 
720 	if (print_graph_prologue(iter, s, 0, 0))
721 		return TRACE_TYPE_PARTIAL_LINE;
722 
723 	/* Overhead */
724 	ret = print_graph_overhead(duration, s);
725 	if (!ret)
726 		return TRACE_TYPE_PARTIAL_LINE;
727 
728 	/* Duration */
729 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
730 		ret = print_graph_duration(duration, s);
731 		if (ret == TRACE_TYPE_PARTIAL_LINE)
732 			return TRACE_TYPE_PARTIAL_LINE;
733 	}
734 
735 	/* Closing brace */
736 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
737 		ret = trace_seq_printf(s, " ");
738 		if (!ret)
739 			return TRACE_TYPE_PARTIAL_LINE;
740 	}
741 
742 	ret = trace_seq_printf(s, "}\n");
743 	if (!ret)
744 		return TRACE_TYPE_PARTIAL_LINE;
745 
746 	/* Overrun */
747 	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
748 		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
749 					trace->overrun);
750 		if (!ret)
751 			return TRACE_TYPE_PARTIAL_LINE;
752 	}
753 
754 	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
755 	if (ret == TRACE_TYPE_PARTIAL_LINE)
756 		return TRACE_TYPE_PARTIAL_LINE;
757 
758 	return TRACE_TYPE_HANDLED;
759 }
760 
761 static enum print_line_t
762 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
763 		    struct trace_iterator *iter)
764 {
765 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
766 	struct fgraph_data *data = iter->private;
767 	struct trace_event *event;
768 	int depth = 0;
769 	int ret;
770 	int i;
771 
772 	if (data)
773 		depth = per_cpu_ptr(data, iter->cpu)->depth;
774 
775 	if (print_graph_prologue(iter, s, 0, 0))
776 		return TRACE_TYPE_PARTIAL_LINE;
777 
778 	/* No overhead */
779 	ret = print_graph_overhead(-1, s);
780 	if (!ret)
781 		return TRACE_TYPE_PARTIAL_LINE;
782 
783 	/* No time */
784 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
785 		ret = trace_seq_printf(s, "            |  ");
786 		if (!ret)
787 			return TRACE_TYPE_PARTIAL_LINE;
788 	}
789 
790 	/* Indentation */
791 	if (depth > 0)
792 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
793 			ret = trace_seq_printf(s, " ");
794 			if (!ret)
795 				return TRACE_TYPE_PARTIAL_LINE;
796 		}
797 
798 	/* The comment */
799 	ret = trace_seq_printf(s, "/* ");
800 	if (!ret)
801 		return TRACE_TYPE_PARTIAL_LINE;
802 
803 	switch (iter->ent->type) {
804 	case TRACE_BPRINT:
805 		ret = trace_print_bprintk_msg_only(iter);
806 		if (ret != TRACE_TYPE_HANDLED)
807 			return ret;
808 		break;
809 	case TRACE_PRINT:
810 		ret = trace_print_printk_msg_only(iter);
811 		if (ret != TRACE_TYPE_HANDLED)
812 			return ret;
813 		break;
814 	default:
815 		event = ftrace_find_event(ent->type);
816 		if (!event)
817 			return TRACE_TYPE_UNHANDLED;
818 
819 		ret = event->trace(iter, sym_flags);
820 		if (ret != TRACE_TYPE_HANDLED)
821 			return ret;
822 	}
823 
824 	/* Strip ending newline */
825 	if (s->buffer[s->len - 1] == '\n') {
826 		s->buffer[s->len - 1] = '\0';
827 		s->len--;
828 	}
829 
830 	ret = trace_seq_printf(s, " */\n");
831 	if (!ret)
832 		return TRACE_TYPE_PARTIAL_LINE;
833 
834 	return TRACE_TYPE_HANDLED;
835 }
836 
837 
838 enum print_line_t
839 print_graph_function(struct trace_iterator *iter)
840 {
841 	struct trace_entry *entry = iter->ent;
842 	struct trace_seq *s = &iter->seq;
843 
844 	switch (entry->type) {
845 	case TRACE_GRAPH_ENT: {
846 		/*
847 		 * print_graph_entry() may consume the current event,
848 		 * thus @field may become invalid, so we need to save it.
849 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
850 		 * it can be safely saved at the stack.
851 		 */
852 		struct ftrace_graph_ent_entry *field, saved;
853 		trace_assign_type(field, entry);
854 		saved = *field;
855 		return print_graph_entry(&saved, s, iter);
856 	}
857 	case TRACE_GRAPH_RET: {
858 		struct ftrace_graph_ret_entry *field;
859 		trace_assign_type(field, entry);
860 		return print_graph_return(&field->ret, s, entry, iter);
861 	}
862 	default:
863 		return print_graph_comment(s, entry, iter);
864 	}
865 
866 	return TRACE_TYPE_HANDLED;
867 }
868 
869 static void print_graph_headers(struct seq_file *s)
870 {
871 	/* 1st line */
872 	seq_printf(s, "# ");
873 	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
874 		seq_printf(s, "     TIME       ");
875 	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
876 		seq_printf(s, "CPU");
877 	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
878 		seq_printf(s, "  TASK/PID      ");
879 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
880 		seq_printf(s, "  DURATION   ");
881 	seq_printf(s, "               FUNCTION CALLS\n");
882 
883 	/* 2nd line */
884 	seq_printf(s, "# ");
885 	if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
886 		seq_printf(s, "      |         ");
887 	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
888 		seq_printf(s, "|  ");
889 	if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
890 		seq_printf(s, "  |    |        ");
891 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
892 		seq_printf(s, "   |   |      ");
893 	seq_printf(s, "               |   |   |   |\n");
894 }
895 
896 static void graph_trace_open(struct trace_iterator *iter)
897 {
898 	/* pid and depth on the last trace processed */
899 	struct fgraph_data *data = alloc_percpu(struct fgraph_data);
900 	int cpu;
901 
902 	if (!data)
903 		pr_warning("function graph tracer: not enough memory\n");
904 	else
905 		for_each_possible_cpu(cpu) {
906 			pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
907 			int *depth = &(per_cpu_ptr(data, cpu)->depth);
908 			*pid = -1;
909 			*depth = 0;
910 		}
911 
912 	iter->private = data;
913 }
914 
915 static void graph_trace_close(struct trace_iterator *iter)
916 {
917 	free_percpu(iter->private);
918 }
919 
920 static struct tracer graph_trace __read_mostly = {
921 	.name		= "function_graph",
922 	.open		= graph_trace_open,
923 	.close		= graph_trace_close,
924 	.wait_pipe	= poll_wait_pipe,
925 	.init		= graph_trace_init,
926 	.reset		= graph_trace_reset,
927 	.print_line	= print_graph_function,
928 	.print_header	= print_graph_headers,
929 	.flags		= &tracer_flags,
930 #ifdef CONFIG_FTRACE_SELFTEST
931 	.selftest	= trace_selftest_startup_function_graph,
932 #endif
933 };
934 
935 static __init int init_graph_trace(void)
936 {
937 	return register_tracer(&graph_trace);
938 }
939 
940 device_initcall(init_graph_trace);
941