1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15
16 #include "trace.h"
17 #include "trace_output.h"
18
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21
22 struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29
30 struct fgraph_ent_args {
31 struct ftrace_graph_ent_entry ent;
32 /* Force the sizeof of args[] to have FTRACE_REGS_MAX_ARGS entries */
33 unsigned long args[FTRACE_REGS_MAX_ARGS];
34 };
35
36 struct fgraph_data {
37 struct fgraph_cpu_data __percpu *cpu_data;
38
39 /* Place to preserve last processed entry. */
40 union {
41 struct fgraph_ent_args ent;
42 /* TODO allow retaddr to have args */
43 struct fgraph_retaddr_ent_entry rent;
44 };
45 struct ftrace_graph_ret_entry ret;
46 int failed;
47 int cpu;
48 };
49
50 #define TRACE_GRAPH_INDENT 2
51
52 unsigned int fgraph_max_depth;
53
54 static struct tracer_opt trace_opts[] = {
55 /* Display overruns? (for self-debug purpose) */
56 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
57 /* Display CPU ? */
58 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
59 /* Display Overhead ? */
60 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
61 /* Display proc name/pid */
62 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
63 /* Display duration of execution */
64 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
65 /* Display absolute time of an entry */
66 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
67 /* Display interrupts */
68 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
69 /* Display function name after trailing } */
70 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
71 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
72 /* Display function return value ? */
73 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
74 /* Display function return value in hexadecimal format ? */
75 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
76 #endif
77 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
78 /* Display function return address ? */
79 { TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
80 #endif
81 #ifdef CONFIG_FUNCTION_TRACE_ARGS
82 /* Display function arguments ? */
83 { TRACER_OPT(funcgraph-args, TRACE_GRAPH_ARGS) },
84 #endif
85 /* Include sleep time (scheduled out) between entry and return */
86 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
87
88 #ifdef CONFIG_FUNCTION_PROFILER
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 #endif
92
93 { } /* Empty entry */
94 };
95
96 static struct tracer_flags tracer_flags = {
97 /* Don't display overruns, proc, or tail by default */
98 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
99 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
100 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
101 .opts = trace_opts
102 };
103
tracer_flags_is_set(u32 flags)104 static bool tracer_flags_is_set(u32 flags)
105 {
106 return (tracer_flags.val & flags) == flags;
107 }
108
109 /*
110 * DURATION column is being also used to display IRQ signs,
111 * following values are used by print_graph_irq and others
112 * to fill in space into DURATION column.
113 */
114 enum {
115 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
116 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
117 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
118 };
119
120 static void
121 print_graph_duration(struct trace_array *tr, unsigned long long duration,
122 struct trace_seq *s, u32 flags);
123
__graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,struct ftrace_regs * fregs)124 static int __graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
125 unsigned int trace_ctx, struct ftrace_regs *fregs)
126 {
127 struct ring_buffer_event *event;
128 struct trace_buffer *buffer = tr->array_buffer.buffer;
129 struct ftrace_graph_ent_entry *entry;
130 int size;
131
132 /* If fregs is defined, add FTRACE_REGS_MAX_ARGS long size words */
133 size = sizeof(*entry) + (FTRACE_REGS_MAX_ARGS * !!fregs * sizeof(long));
134
135 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, size, trace_ctx);
136 if (!event)
137 return 0;
138
139 entry = ring_buffer_event_data(event);
140 entry->graph_ent = *trace;
141
142 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
143 if (fregs) {
144 for (int i = 0; i < FTRACE_REGS_MAX_ARGS; i++)
145 entry->args[i] = ftrace_regs_get_argument(fregs, i);
146 }
147 #endif
148
149 trace_buffer_unlock_commit_nostack(buffer, event);
150
151 return 1;
152 }
153
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx)154 int __trace_graph_entry(struct trace_array *tr,
155 struct ftrace_graph_ent *trace,
156 unsigned int trace_ctx)
157 {
158 return __graph_entry(tr, trace, trace_ctx, NULL);
159 }
160
161 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
__trace_graph_retaddr_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,unsigned long retaddr)162 int __trace_graph_retaddr_entry(struct trace_array *tr,
163 struct ftrace_graph_ent *trace,
164 unsigned int trace_ctx,
165 unsigned long retaddr)
166 {
167 struct ring_buffer_event *event;
168 struct trace_buffer *buffer = tr->array_buffer.buffer;
169 struct fgraph_retaddr_ent_entry *entry;
170
171 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT,
172 sizeof(*entry), trace_ctx);
173 if (!event)
174 return 0;
175 entry = ring_buffer_event_data(event);
176 entry->graph_ent.func = trace->func;
177 entry->graph_ent.depth = trace->depth;
178 entry->graph_ent.retaddr = retaddr;
179 trace_buffer_unlock_commit_nostack(buffer, event);
180
181 return 1;
182 }
183 #else
__trace_graph_retaddr_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx,unsigned long retaddr)184 int __trace_graph_retaddr_entry(struct trace_array *tr,
185 struct ftrace_graph_ent *trace,
186 unsigned int trace_ctx,
187 unsigned long retaddr)
188 {
189 return 1;
190 }
191 #endif
192
ftrace_graph_ignore_irqs(void)193 static inline int ftrace_graph_ignore_irqs(void)
194 {
195 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
196 return 0;
197
198 return in_hardirq();
199 }
200
201 struct fgraph_times {
202 unsigned long long calltime;
203 unsigned long long sleeptime; /* may be optional! */
204 };
205
graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)206 static int graph_entry(struct ftrace_graph_ent *trace,
207 struct fgraph_ops *gops,
208 struct ftrace_regs *fregs)
209 {
210 unsigned long *task_var = fgraph_get_task_var(gops);
211 struct trace_array *tr = gops->private;
212 struct fgraph_times *ftimes;
213 unsigned int trace_ctx;
214 int ret = 0;
215
216 if (*task_var & TRACE_GRAPH_NOTRACE)
217 return 0;
218
219 /*
220 * Do not trace a function if it's filtered by set_graph_notrace.
221 * Make the index of ret stack negative to indicate that it should
222 * ignore further functions. But it needs its own ret stack entry
223 * to recover the original index in order to continue tracing after
224 * returning from the function.
225 */
226 if (ftrace_graph_notrace_addr(trace->func)) {
227 *task_var |= TRACE_GRAPH_NOTRACE;
228 /*
229 * Need to return 1 to have the return called
230 * that will clear the NOTRACE bit.
231 */
232 return 1;
233 }
234
235 if (!ftrace_trace_task(tr))
236 return 0;
237
238 if (ftrace_graph_ignore_func(gops, trace))
239 return 0;
240
241 if (ftrace_graph_ignore_irqs())
242 return 0;
243
244 if (fgraph_sleep_time) {
245 /* Only need to record the calltime */
246 ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
247 } else {
248 ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
249 if (ftimes)
250 ftimes->sleeptime = current->ftrace_sleeptime;
251 }
252 if (!ftimes)
253 return 0;
254
255 ftimes->calltime = trace_clock_local();
256
257 /*
258 * Stop here if tracing_threshold is set. We only write function return
259 * events to the ring buffer.
260 */
261 if (tracing_thresh)
262 return 1;
263
264 trace_ctx = tracing_gen_ctx();
265 if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
266 tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
267 unsigned long retaddr = ftrace_graph_top_ret_addr(current);
268 ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
269 } else {
270 ret = __graph_entry(tr, trace, trace_ctx, fregs);
271 }
272
273 return ret;
274 }
275
trace_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)276 int trace_graph_entry(struct ftrace_graph_ent *trace,
277 struct fgraph_ops *gops,
278 struct ftrace_regs *fregs)
279 {
280 return graph_entry(trace, gops, NULL);
281 }
282
trace_graph_entry_args(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)283 static int trace_graph_entry_args(struct ftrace_graph_ent *trace,
284 struct fgraph_ops *gops,
285 struct ftrace_regs *fregs)
286 {
287 return graph_entry(trace, gops, fregs);
288 }
289
290 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned int trace_ctx)291 __trace_graph_function(struct trace_array *tr,
292 unsigned long ip, unsigned int trace_ctx)
293 {
294 u64 time = trace_clock_local();
295 struct ftrace_graph_ent ent = {
296 .func = ip,
297 .depth = 0,
298 };
299 struct ftrace_graph_ret ret = {
300 .func = ip,
301 .depth = 0,
302 };
303
304 __trace_graph_entry(tr, &ent, trace_ctx);
305 __trace_graph_return(tr, &ret, trace_ctx, time, time);
306 }
307
308 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)309 trace_graph_function(struct trace_array *tr,
310 unsigned long ip, unsigned long parent_ip,
311 unsigned int trace_ctx)
312 {
313 __trace_graph_function(tr, ip, trace_ctx);
314 }
315
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned int trace_ctx,u64 calltime,u64 rettime)316 void __trace_graph_return(struct trace_array *tr,
317 struct ftrace_graph_ret *trace,
318 unsigned int trace_ctx,
319 u64 calltime, u64 rettime)
320 {
321 struct ring_buffer_event *event;
322 struct trace_buffer *buffer = tr->array_buffer.buffer;
323 struct ftrace_graph_ret_entry *entry;
324
325 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
326 sizeof(*entry), trace_ctx);
327 if (!event)
328 return;
329 entry = ring_buffer_event_data(event);
330 entry->ret = *trace;
331 entry->calltime = calltime;
332 entry->rettime = rettime;
333 trace_buffer_unlock_commit_nostack(buffer, event);
334 }
335
handle_nosleeptime(struct ftrace_graph_ret * trace,struct fgraph_times * ftimes,int size)336 static void handle_nosleeptime(struct ftrace_graph_ret *trace,
337 struct fgraph_times *ftimes,
338 int size)
339 {
340 if (fgraph_sleep_time || size < sizeof(*ftimes))
341 return;
342
343 ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
344 }
345
trace_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)346 void trace_graph_return(struct ftrace_graph_ret *trace,
347 struct fgraph_ops *gops, struct ftrace_regs *fregs)
348 {
349 unsigned long *task_var = fgraph_get_task_var(gops);
350 struct trace_array *tr = gops->private;
351 struct fgraph_times *ftimes;
352 unsigned int trace_ctx;
353 u64 calltime, rettime;
354 int size;
355
356 rettime = trace_clock_local();
357
358 ftrace_graph_addr_finish(gops, trace);
359
360 if (*task_var & TRACE_GRAPH_NOTRACE) {
361 *task_var &= ~TRACE_GRAPH_NOTRACE;
362 return;
363 }
364
365 ftimes = fgraph_retrieve_data(gops->idx, &size);
366 if (!ftimes)
367 return;
368
369 handle_nosleeptime(trace, ftimes, size);
370
371 calltime = ftimes->calltime;
372
373 trace_ctx = tracing_gen_ctx();
374 __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
375 }
376
trace_graph_thresh_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)377 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
378 struct fgraph_ops *gops,
379 struct ftrace_regs *fregs)
380 {
381 struct fgraph_times *ftimes;
382 int size;
383
384 ftrace_graph_addr_finish(gops, trace);
385
386 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
387 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
388 return;
389 }
390
391 ftimes = fgraph_retrieve_data(gops->idx, &size);
392 if (!ftimes)
393 return;
394
395 handle_nosleeptime(trace, ftimes, size);
396
397 if (tracing_thresh &&
398 (trace_clock_local() - ftimes->calltime < tracing_thresh))
399 return;
400 else
401 trace_graph_return(trace, gops, fregs);
402 }
403
404 static struct fgraph_ops funcgraph_ops = {
405 .entryfunc = &trace_graph_entry,
406 .retfunc = &trace_graph_return,
407 };
408
allocate_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)409 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
410 {
411 struct fgraph_ops *gops;
412
413 gops = kzalloc(sizeof(*gops), GFP_KERNEL);
414 if (!gops)
415 return -ENOMEM;
416
417 gops->entryfunc = &trace_graph_entry;
418 gops->retfunc = &trace_graph_return;
419
420 tr->gops = gops;
421 gops->private = tr;
422
423 fgraph_init_ops(&gops->ops, ops);
424
425 return 0;
426 }
427
free_fgraph_ops(struct trace_array * tr)428 void free_fgraph_ops(struct trace_array *tr)
429 {
430 kfree(tr->gops);
431 }
432
init_array_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)433 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
434 {
435 tr->gops = &funcgraph_ops;
436 funcgraph_ops.private = tr;
437 fgraph_init_ops(&tr->gops->ops, ops);
438 }
439
graph_trace_init(struct trace_array * tr)440 static int graph_trace_init(struct trace_array *tr)
441 {
442 int ret;
443
444 if (tracer_flags_is_set(TRACE_GRAPH_ARGS))
445 tr->gops->entryfunc = trace_graph_entry_args;
446 else
447 tr->gops->entryfunc = trace_graph_entry;
448
449 if (tracing_thresh)
450 tr->gops->retfunc = trace_graph_thresh_return;
451 else
452 tr->gops->retfunc = trace_graph_return;
453
454 /* Make gops functions visible before we start tracing */
455 smp_mb();
456
457 ret = register_ftrace_graph(tr->gops);
458 if (ret)
459 return ret;
460 tracing_start_cmdline_record();
461
462 return 0;
463 }
464
465 static struct tracer graph_trace;
466
ftrace_graph_trace_args(struct trace_array * tr,int set)467 static int ftrace_graph_trace_args(struct trace_array *tr, int set)
468 {
469 trace_func_graph_ent_t entry;
470
471 /* Do nothing if the current tracer is not this tracer */
472 if (tr->current_trace != &graph_trace)
473 return 0;
474
475 if (set)
476 entry = trace_graph_entry_args;
477 else
478 entry = trace_graph_entry;
479
480 /* See if there's any changes */
481 if (tr->gops->entryfunc == entry)
482 return 0;
483
484 unregister_ftrace_graph(tr->gops);
485
486 tr->gops->entryfunc = entry;
487
488 /* Make gops functions visible before we start tracing */
489 smp_mb();
490 return register_ftrace_graph(tr->gops);
491 }
492
graph_trace_reset(struct trace_array * tr)493 static void graph_trace_reset(struct trace_array *tr)
494 {
495 tracing_stop_cmdline_record();
496 unregister_ftrace_graph(tr->gops);
497 }
498
graph_trace_update_thresh(struct trace_array * tr)499 static int graph_trace_update_thresh(struct trace_array *tr)
500 {
501 graph_trace_reset(tr);
502 return graph_trace_init(tr);
503 }
504
505 static int max_bytes_for_cpu;
506
print_graph_cpu(struct trace_seq * s,int cpu)507 static void print_graph_cpu(struct trace_seq *s, int cpu)
508 {
509 /*
510 * Start with a space character - to make it stand out
511 * to the right a bit when trace output is pasted into
512 * email:
513 */
514 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
515 }
516
517 #define TRACE_GRAPH_PROCINFO_LENGTH 14
518
print_graph_proc(struct trace_seq * s,pid_t pid)519 static void print_graph_proc(struct trace_seq *s, pid_t pid)
520 {
521 char comm[TASK_COMM_LEN];
522 /* sign + log10(MAX_INT) + '\0' */
523 char pid_str[12];
524 int spaces = 0;
525 int len;
526 int i;
527
528 trace_find_cmdline(pid, comm);
529 comm[7] = '\0';
530 sprintf(pid_str, "%d", pid);
531
532 /* 1 stands for the "-" character */
533 len = strlen(comm) + strlen(pid_str) + 1;
534
535 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
536 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
537
538 /* First spaces to align center */
539 for (i = 0; i < spaces / 2; i++)
540 trace_seq_putc(s, ' ');
541
542 trace_seq_printf(s, "%s-%s", comm, pid_str);
543
544 /* Last spaces to align center */
545 for (i = 0; i < spaces - (spaces / 2); i++)
546 trace_seq_putc(s, ' ');
547 }
548
549
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)550 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
551 {
552 trace_seq_putc(s, ' ');
553 trace_print_lat_fmt(s, entry);
554 trace_seq_puts(s, " | ");
555 }
556
557 /* If the pid changed since the last trace, output this event */
558 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)559 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
560 {
561 pid_t prev_pid;
562 pid_t *last_pid;
563
564 if (!data)
565 return;
566
567 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
568
569 if (*last_pid == pid)
570 return;
571
572 prev_pid = *last_pid;
573 *last_pid = pid;
574
575 if (prev_pid == -1)
576 return;
577 /*
578 * Context-switch trace line:
579
580 ------------------------------------------
581 | 1) migration/0--1 => sshd-1755
582 ------------------------------------------
583
584 */
585 trace_seq_puts(s, " ------------------------------------------\n");
586 print_graph_cpu(s, cpu);
587 print_graph_proc(s, prev_pid);
588 trace_seq_puts(s, " => ");
589 print_graph_proc(s, pid);
590 trace_seq_puts(s, "\n ------------------------------------------\n\n");
591 }
592
593 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)594 get_return_for_leaf(struct trace_iterator *iter,
595 struct ftrace_graph_ent_entry *curr)
596 {
597 struct fgraph_data *data = iter->private;
598 struct ring_buffer_iter *ring_iter = NULL;
599 struct ring_buffer_event *event;
600 struct ftrace_graph_ret_entry *next;
601
602 /*
603 * If the previous output failed to write to the seq buffer,
604 * then we just reuse the data from before.
605 */
606 if (data && data->failed) {
607 curr = &data->ent.ent;
608 next = &data->ret;
609 } else {
610
611 ring_iter = trace_buffer_iter(iter, iter->cpu);
612
613 /* First peek to compare current entry and the next one */
614 if (ring_iter)
615 event = ring_buffer_iter_peek(ring_iter, NULL);
616 else {
617 /*
618 * We need to consume the current entry to see
619 * the next one.
620 */
621 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
622 NULL, NULL);
623 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
624 NULL, NULL);
625 }
626
627 if (!event)
628 return NULL;
629
630 next = ring_buffer_event_data(event);
631
632 if (data) {
633 /*
634 * Save current and next entries for later reference
635 * if the output fails.
636 */
637 if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) {
638 data->rent = *(struct fgraph_retaddr_ent_entry *)curr;
639 } else {
640 int size = min((int)sizeof(data->ent), (int)iter->ent_size);
641
642 memcpy(&data->ent, curr, size);
643 }
644 /*
645 * If the next event is not a return type, then
646 * we only care about what type it is. Otherwise we can
647 * safely copy the entire event.
648 */
649 if (next->ent.type == TRACE_GRAPH_RET)
650 data->ret = *next;
651 else
652 data->ret.ent.type = next->ent.type;
653 }
654 }
655
656 if (next->ent.type != TRACE_GRAPH_RET)
657 return NULL;
658
659 if (curr->ent.pid != next->ent.pid ||
660 curr->graph_ent.func != next->ret.func)
661 return NULL;
662
663 /* this is a leaf, now advance the iterator */
664 if (ring_iter)
665 ring_buffer_iter_advance(ring_iter);
666
667 return next;
668 }
669
print_graph_abs_time(u64 t,struct trace_seq * s)670 static void print_graph_abs_time(u64 t, struct trace_seq *s)
671 {
672 unsigned long usecs_rem;
673
674 usecs_rem = do_div(t, NSEC_PER_SEC);
675 usecs_rem /= 1000;
676
677 trace_seq_printf(s, "%5lu.%06lu | ",
678 (unsigned long)t, usecs_rem);
679 }
680
681 static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)682 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
683 {
684 unsigned long long usecs;
685
686 usecs = iter->ts - iter->array_buffer->time_start;
687 do_div(usecs, NSEC_PER_USEC);
688
689 trace_seq_printf(s, "%9llu us | ", usecs);
690 }
691
692 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)693 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
694 enum trace_type type, int cpu, pid_t pid, u32 flags)
695 {
696 struct trace_array *tr = iter->tr;
697 struct trace_seq *s = &iter->seq;
698 struct trace_entry *ent = iter->ent;
699
700 addr += iter->tr->text_delta;
701
702 if (addr < (unsigned long)__irqentry_text_start ||
703 addr >= (unsigned long)__irqentry_text_end)
704 return;
705
706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
707 /* Absolute time */
708 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
709 print_graph_abs_time(iter->ts, s);
710
711 /* Relative time */
712 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
713 print_graph_rel_time(iter, s);
714
715 /* Cpu */
716 if (flags & TRACE_GRAPH_PRINT_CPU)
717 print_graph_cpu(s, cpu);
718
719 /* Proc */
720 if (flags & TRACE_GRAPH_PRINT_PROC) {
721 print_graph_proc(s, pid);
722 trace_seq_puts(s, " | ");
723 }
724
725 /* Latency format */
726 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
727 print_graph_lat_fmt(s, ent);
728 }
729
730 /* No overhead */
731 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
732
733 if (type == TRACE_GRAPH_ENT)
734 trace_seq_puts(s, "==========>");
735 else
736 trace_seq_puts(s, "<==========");
737
738 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
739 trace_seq_putc(s, '\n');
740 }
741
742 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)743 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
744 {
745 unsigned long nsecs_rem = do_div(duration, 1000);
746 /* log10(ULONG_MAX) + '\0' */
747 char usecs_str[21];
748 char nsecs_str[5];
749 int len;
750 int i;
751
752 sprintf(usecs_str, "%lu", (unsigned long) duration);
753
754 /* Print msecs */
755 trace_seq_printf(s, "%s", usecs_str);
756
757 len = strlen(usecs_str);
758
759 /* Print nsecs (we don't want to exceed 7 numbers) */
760 if (len < 7) {
761 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
762
763 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
764 trace_seq_printf(s, ".%s", nsecs_str);
765 len += strlen(nsecs_str) + 1;
766 }
767
768 trace_seq_puts(s, " us ");
769
770 /* Print remaining spaces to fit the row's width */
771 for (i = len; i < 8; i++)
772 trace_seq_putc(s, ' ');
773 }
774
775 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)776 print_graph_duration(struct trace_array *tr, unsigned long long duration,
777 struct trace_seq *s, u32 flags)
778 {
779 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
780 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
781 return;
782
783 /* No real adata, just filling the column with spaces */
784 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
785 case FLAGS_FILL_FULL:
786 trace_seq_puts(s, " | ");
787 return;
788 case FLAGS_FILL_START:
789 trace_seq_puts(s, " ");
790 return;
791 case FLAGS_FILL_END:
792 trace_seq_puts(s, " |");
793 return;
794 }
795
796 /* Signal a overhead of time execution to the output */
797 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
798 trace_seq_printf(s, "%c ", trace_find_mark(duration));
799 else
800 trace_seq_puts(s, " ");
801
802 trace_print_graph_duration(duration, s);
803 trace_seq_puts(s, "| ");
804 }
805
806 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
807 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
808 #else
809 #define __TRACE_GRAPH_PRINT_RETVAL 0
810 #endif
811
812 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
813 #define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR
print_graph_retaddr(struct trace_seq * s,struct fgraph_retaddr_ent_entry * entry,u32 trace_flags,bool comment)814 static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry,
815 u32 trace_flags, bool comment)
816 {
817 if (comment)
818 trace_seq_puts(s, " /*");
819
820 trace_seq_puts(s, " <-");
821 seq_print_ip_sym(s, entry->graph_ent.retaddr, trace_flags | TRACE_ITER_SYM_OFFSET);
822
823 if (comment)
824 trace_seq_puts(s, " */");
825 }
826 #else
827 #define __TRACE_GRAPH_PRINT_RETADDR 0
828 #define print_graph_retaddr(_seq, _entry, _tflags, _comment) do { } while (0)
829 #endif
830
831 #if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR)
832
print_graph_retval(struct trace_seq * s,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret * graph_ret,void * func,u32 opt_flags,u32 trace_flags,int args_size)833 static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
834 struct ftrace_graph_ret *graph_ret, void *func,
835 u32 opt_flags, u32 trace_flags, int args_size)
836 {
837 unsigned long err_code = 0;
838 unsigned long retval = 0;
839 bool print_retaddr = false;
840 bool print_retval = false;
841 bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX);
842
843 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
844 retval = graph_ret->retval;
845 print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL);
846 #endif
847
848 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
849 print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR);
850 #endif
851
852 if (print_retval && retval && !hex_format) {
853 /* Check if the return value matches the negative format */
854 if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
855 (((u64)retval) >> 32) == 0) {
856 err_code = sign_extend64(retval, 31);
857 } else {
858 err_code = retval;
859 }
860
861 if (!IS_ERR_VALUE(err_code))
862 err_code = 0;
863 }
864
865 if (entry) {
866 if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
867 print_retaddr = false;
868
869 trace_seq_printf(s, "%ps", func);
870
871 if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
872 print_function_args(s, entry->args, (unsigned long)func);
873 trace_seq_putc(s, ';');
874 } else
875 trace_seq_puts(s, "();");
876
877 if (print_retval || print_retaddr)
878 trace_seq_puts(s, " /*");
879 } else {
880 print_retaddr = false;
881 trace_seq_printf(s, "} /* %ps", func);
882 }
883
884 if (print_retaddr)
885 print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
886 trace_flags, false);
887
888 if (print_retval) {
889 if (hex_format || (err_code == 0))
890 trace_seq_printf(s, " ret=0x%lx", retval);
891 else
892 trace_seq_printf(s, " ret=%ld", err_code);
893 }
894
895 if (!entry || print_retval || print_retaddr)
896 trace_seq_puts(s, " */");
897 }
898
899 #else
900
901 #define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags, args_size) \
902 do {} while (0)
903
904 #endif
905
906 /* Case of a leaf function on its call entry */
907 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)908 print_graph_entry_leaf(struct trace_iterator *iter,
909 struct ftrace_graph_ent_entry *entry,
910 struct ftrace_graph_ret_entry *ret_entry,
911 struct trace_seq *s, u32 flags)
912 {
913 struct fgraph_data *data = iter->private;
914 struct trace_array *tr = iter->tr;
915 struct ftrace_graph_ret *graph_ret;
916 struct ftrace_graph_ent *call;
917 unsigned long long duration;
918 unsigned long ret_func;
919 int args_size;
920 int cpu = iter->cpu;
921 int i;
922
923 args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
924
925 graph_ret = &ret_entry->ret;
926 call = &entry->graph_ent;
927 duration = ret_entry->rettime - ret_entry->calltime;
928
929 if (data) {
930 struct fgraph_cpu_data *cpu_data;
931
932 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
933
934 /*
935 * Comments display at + 1 to depth. Since
936 * this is a leaf function, keep the comments
937 * equal to this depth.
938 */
939 cpu_data->depth = call->depth - 1;
940
941 /* No need to keep this function around for this depth */
942 if (call->depth < FTRACE_RETFUNC_DEPTH &&
943 !WARN_ON_ONCE(call->depth < 0))
944 cpu_data->enter_funcs[call->depth] = 0;
945 }
946
947 /* Overhead and duration */
948 print_graph_duration(tr, duration, s, flags);
949
950 /* Function */
951 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
952 trace_seq_putc(s, ' ');
953
954 ret_func = graph_ret->func + iter->tr->text_delta;
955
956 /*
957 * Write out the function return value or return address
958 */
959 if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
960 print_graph_retval(s, entry, graph_ret,
961 (void *)graph_ret->func + iter->tr->text_delta,
962 flags, tr->trace_flags, args_size);
963 } else {
964 trace_seq_printf(s, "%ps", (void *)ret_func);
965
966 if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) {
967 print_function_args(s, entry->args, ret_func);
968 trace_seq_putc(s, ';');
969 } else
970 trace_seq_puts(s, "();");
971 }
972 trace_seq_putc(s, '\n');
973
974 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
975 cpu, iter->ent->pid, flags);
976
977 return trace_handle_return(s);
978 }
979
980 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)981 print_graph_entry_nested(struct trace_iterator *iter,
982 struct ftrace_graph_ent_entry *entry,
983 struct trace_seq *s, int cpu, u32 flags)
984 {
985 struct ftrace_graph_ent *call = &entry->graph_ent;
986 struct fgraph_data *data = iter->private;
987 struct trace_array *tr = iter->tr;
988 unsigned long func;
989 int args_size;
990 int i;
991
992 if (data) {
993 struct fgraph_cpu_data *cpu_data;
994 int cpu = iter->cpu;
995
996 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
997 cpu_data->depth = call->depth;
998
999 /* Save this function pointer to see if the exit matches */
1000 if (call->depth < FTRACE_RETFUNC_DEPTH &&
1001 !WARN_ON_ONCE(call->depth < 0))
1002 cpu_data->enter_funcs[call->depth] = call->func;
1003 }
1004
1005 /* No time */
1006 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1007
1008 /* Function */
1009 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
1010 trace_seq_putc(s, ' ');
1011
1012 func = call->func + iter->tr->text_delta;
1013
1014 trace_seq_printf(s, "%ps", (void *)func);
1015
1016 args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
1017
1018 if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long))
1019 print_function_args(s, entry->args, func);
1020 else
1021 trace_seq_puts(s, "()");
1022
1023 trace_seq_puts(s, " {");
1024
1025 if (flags & __TRACE_GRAPH_PRINT_RETADDR &&
1026 entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
1027 print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
1028 tr->trace_flags, true);
1029 trace_seq_putc(s, '\n');
1030
1031 if (trace_seq_has_overflowed(s))
1032 return TRACE_TYPE_PARTIAL_LINE;
1033
1034 /*
1035 * we already consumed the current entry to check the next one
1036 * and see if this is a leaf.
1037 */
1038 return TRACE_TYPE_NO_CONSUME;
1039 }
1040
1041 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)1042 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
1043 int type, unsigned long addr, u32 flags)
1044 {
1045 struct fgraph_data *data = iter->private;
1046 struct trace_entry *ent = iter->ent;
1047 struct trace_array *tr = iter->tr;
1048 int cpu = iter->cpu;
1049
1050 /* Pid */
1051 verif_pid(s, ent->pid, cpu, data);
1052
1053 if (type)
1054 /* Interrupt */
1055 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
1056
1057 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1058 return;
1059
1060 /* Absolute time */
1061 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1062 print_graph_abs_time(iter->ts, s);
1063
1064 /* Relative time */
1065 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1066 print_graph_rel_time(iter, s);
1067
1068 /* Cpu */
1069 if (flags & TRACE_GRAPH_PRINT_CPU)
1070 print_graph_cpu(s, cpu);
1071
1072 /* Proc */
1073 if (flags & TRACE_GRAPH_PRINT_PROC) {
1074 print_graph_proc(s, ent->pid);
1075 trace_seq_puts(s, " | ");
1076 }
1077
1078 /* Latency format */
1079 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
1080 print_graph_lat_fmt(s, ent);
1081
1082 return;
1083 }
1084
1085 /*
1086 * Entry check for irq code
1087 *
1088 * returns 1 if
1089 * - we are inside irq code
1090 * - we just entered irq code
1091 *
1092 * returns 0 if
1093 * - funcgraph-interrupts option is set
1094 * - we are not inside irq code
1095 */
1096 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)1097 check_irq_entry(struct trace_iterator *iter, u32 flags,
1098 unsigned long addr, int depth)
1099 {
1100 int cpu = iter->cpu;
1101 int *depth_irq;
1102 struct fgraph_data *data = iter->private;
1103
1104 addr += iter->tr->text_delta;
1105
1106 /*
1107 * If we are either displaying irqs, or we got called as
1108 * a graph event and private data does not exist,
1109 * then we bypass the irq check.
1110 */
1111 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1112 (!data))
1113 return 0;
1114
1115 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1116
1117 /*
1118 * We are inside the irq code
1119 */
1120 if (*depth_irq >= 0)
1121 return 1;
1122
1123 if ((addr < (unsigned long)__irqentry_text_start) ||
1124 (addr >= (unsigned long)__irqentry_text_end))
1125 return 0;
1126
1127 /*
1128 * We are entering irq code.
1129 */
1130 *depth_irq = depth;
1131 return 1;
1132 }
1133
1134 /*
1135 * Return check for irq code
1136 *
1137 * returns 1 if
1138 * - we are inside irq code
1139 * - we just left irq code
1140 *
1141 * returns 0 if
1142 * - funcgraph-interrupts option is set
1143 * - we are not inside irq code
1144 */
1145 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)1146 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1147 {
1148 int cpu = iter->cpu;
1149 int *depth_irq;
1150 struct fgraph_data *data = iter->private;
1151
1152 /*
1153 * If we are either displaying irqs, or we got called as
1154 * a graph event and private data does not exist,
1155 * then we bypass the irq check.
1156 */
1157 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1158 (!data))
1159 return 0;
1160
1161 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1162
1163 /*
1164 * We are not inside the irq code.
1165 */
1166 if (*depth_irq == -1)
1167 return 0;
1168
1169 /*
1170 * We are inside the irq code, and this is returning entry.
1171 * Let's not trace it and clear the entry depth, since
1172 * we are out of irq code.
1173 *
1174 * This condition ensures that we 'leave the irq code' once
1175 * we are out of the entry depth. Thus protecting us from
1176 * the RETURN entry loss.
1177 */
1178 if (*depth_irq >= depth) {
1179 *depth_irq = -1;
1180 return 1;
1181 }
1182
1183 /*
1184 * We are inside the irq code, and this is not the entry.
1185 */
1186 return 1;
1187 }
1188
1189 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)1190 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1191 struct trace_iterator *iter, u32 flags)
1192 {
1193 struct fgraph_data *data = iter->private;
1194 struct ftrace_graph_ent *call;
1195 struct ftrace_graph_ret_entry *leaf_ret;
1196 static enum print_line_t ret;
1197 int cpu = iter->cpu;
1198 /*
1199 * print_graph_entry() may consume the current event,
1200 * thus @field may become invalid, so we need to save it.
1201 * sizeof(struct ftrace_graph_ent_entry) is very small,
1202 * it can be safely saved at the stack.
1203 */
1204 struct ftrace_graph_ent_entry *entry;
1205 u8 save_buf[sizeof(*entry) + FTRACE_REGS_MAX_ARGS * sizeof(long)];
1206
1207 /* The ent_size is expected to be as big as the entry */
1208 if (iter->ent_size > sizeof(save_buf))
1209 iter->ent_size = sizeof(save_buf);
1210
1211 entry = (void *)save_buf;
1212 memcpy(entry, field, iter->ent_size);
1213
1214 call = &entry->graph_ent;
1215
1216 if (check_irq_entry(iter, flags, call->func, call->depth))
1217 return TRACE_TYPE_HANDLED;
1218
1219 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1220
1221 leaf_ret = get_return_for_leaf(iter, entry);
1222 if (leaf_ret)
1223 ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
1224 else
1225 ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
1226
1227 if (data) {
1228 /*
1229 * If we failed to write our output, then we need to make
1230 * note of it. Because we already consumed our entry.
1231 */
1232 if (s->full) {
1233 data->failed = 1;
1234 data->cpu = cpu;
1235 } else
1236 data->failed = 0;
1237 }
1238
1239 return ret;
1240 }
1241
1242 static enum print_line_t
print_graph_return(struct ftrace_graph_ret_entry * retentry,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1243 print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
1244 struct trace_entry *ent, struct trace_iterator *iter,
1245 u32 flags)
1246 {
1247 struct ftrace_graph_ret *trace = &retentry->ret;
1248 u64 calltime = retentry->calltime;
1249 u64 rettime = retentry->rettime;
1250 unsigned long long duration = rettime - calltime;
1251 struct fgraph_data *data = iter->private;
1252 struct trace_array *tr = iter->tr;
1253 unsigned long func;
1254 pid_t pid = ent->pid;
1255 int cpu = iter->cpu;
1256 int func_match = 1;
1257 int i;
1258
1259 func = trace->func + iter->tr->text_delta;
1260
1261 if (check_irq_return(iter, flags, trace->depth))
1262 return TRACE_TYPE_HANDLED;
1263
1264 if (data) {
1265 struct fgraph_cpu_data *cpu_data;
1266 int cpu = iter->cpu;
1267
1268 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1269
1270 /*
1271 * Comments display at + 1 to depth. This is the
1272 * return from a function, we now want the comments
1273 * to display at the same level of the bracket.
1274 */
1275 cpu_data->depth = trace->depth - 1;
1276
1277 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1278 !WARN_ON_ONCE(trace->depth < 0)) {
1279 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1280 func_match = 0;
1281 cpu_data->enter_funcs[trace->depth] = 0;
1282 }
1283 }
1284
1285 print_graph_prologue(iter, s, 0, 0, flags);
1286
1287 /* Overhead and duration */
1288 print_graph_duration(tr, duration, s, flags);
1289
1290 /* Closing brace */
1291 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1292 trace_seq_putc(s, ' ');
1293
1294 /*
1295 * Always write out the function name and its return value if the
1296 * funcgraph-retval option is enabled.
1297 */
1298 if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1299 print_graph_retval(s, NULL, trace, (void *)func, flags,
1300 tr->trace_flags, 0);
1301 } else {
1302 /*
1303 * If the return function does not have a matching entry,
1304 * then the entry was lost. Instead of just printing
1305 * the '}' and letting the user guess what function this
1306 * belongs to, write out the function name. Always do
1307 * that if the funcgraph-tail option is enabled.
1308 */
1309 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1310 trace_seq_puts(s, "}");
1311 else
1312 trace_seq_printf(s, "} /* %ps */", (void *)func);
1313 }
1314 trace_seq_putc(s, '\n');
1315
1316 /* Overrun */
1317 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1318 trace_seq_printf(s, " (Overruns: %u)\n",
1319 trace->overrun);
1320
1321 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1322 cpu, pid, flags);
1323
1324 return trace_handle_return(s);
1325 }
1326
1327 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1328 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1329 struct trace_iterator *iter, u32 flags)
1330 {
1331 struct trace_array *tr = iter->tr;
1332 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1333 struct fgraph_data *data = iter->private;
1334 struct trace_event *event;
1335 int depth = 0;
1336 int ret;
1337 int i;
1338
1339 if (data)
1340 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1341
1342 print_graph_prologue(iter, s, 0, 0, flags);
1343
1344 /* No time */
1345 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1346
1347 /* Indentation */
1348 if (depth > 0)
1349 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1350 trace_seq_putc(s, ' ');
1351
1352 /* The comment */
1353 trace_seq_puts(s, "/* ");
1354
1355 switch (iter->ent->type) {
1356 case TRACE_BPUTS:
1357 ret = trace_print_bputs_msg_only(iter);
1358 if (ret != TRACE_TYPE_HANDLED)
1359 return ret;
1360 break;
1361 case TRACE_BPRINT:
1362 ret = trace_print_bprintk_msg_only(iter);
1363 if (ret != TRACE_TYPE_HANDLED)
1364 return ret;
1365 break;
1366 case TRACE_PRINT:
1367 ret = trace_print_printk_msg_only(iter);
1368 if (ret != TRACE_TYPE_HANDLED)
1369 return ret;
1370 break;
1371 default:
1372 event = ftrace_find_event(ent->type);
1373 if (!event)
1374 return TRACE_TYPE_UNHANDLED;
1375
1376 ret = event->funcs->trace(iter, sym_flags, event);
1377 if (ret != TRACE_TYPE_HANDLED)
1378 return ret;
1379 }
1380
1381 if (trace_seq_has_overflowed(s))
1382 goto out;
1383
1384 /* Strip ending newline */
1385 if (s->buffer[s->seq.len - 1] == '\n') {
1386 s->buffer[s->seq.len - 1] = '\0';
1387 s->seq.len--;
1388 }
1389
1390 trace_seq_puts(s, " */\n");
1391 out:
1392 return trace_handle_return(s);
1393 }
1394
1395
1396 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1397 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1398 {
1399 struct ftrace_graph_ent_entry *field;
1400 struct fgraph_data *data = iter->private;
1401 struct trace_entry *entry = iter->ent;
1402 struct trace_seq *s = &iter->seq;
1403 int cpu = iter->cpu;
1404 int ret;
1405
1406 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1407 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1408 return TRACE_TYPE_HANDLED;
1409 }
1410
1411 /*
1412 * If the last output failed, there's a possibility we need
1413 * to print out the missing entry which would never go out.
1414 */
1415 if (data && data->failed) {
1416 field = &data->ent.ent;
1417 iter->cpu = data->cpu;
1418 ret = print_graph_entry(field, s, iter, flags);
1419 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1420 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1421 ret = TRACE_TYPE_NO_CONSUME;
1422 }
1423 iter->cpu = cpu;
1424 return ret;
1425 }
1426
1427 switch (entry->type) {
1428 case TRACE_GRAPH_ENT: {
1429 trace_assign_type(field, entry);
1430 return print_graph_entry(field, s, iter, flags);
1431 }
1432 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1433 case TRACE_GRAPH_RETADDR_ENT: {
1434 struct fgraph_retaddr_ent_entry saved;
1435 struct fgraph_retaddr_ent_entry *rfield;
1436
1437 trace_assign_type(rfield, entry);
1438 saved = *rfield;
1439 return print_graph_entry((struct ftrace_graph_ent_entry *)&saved, s, iter, flags);
1440 }
1441 #endif
1442 case TRACE_GRAPH_RET: {
1443 struct ftrace_graph_ret_entry *field;
1444 trace_assign_type(field, entry);
1445 return print_graph_return(field, s, entry, iter, flags);
1446 }
1447 case TRACE_STACK:
1448 case TRACE_FN:
1449 /* dont trace stack and functions as comments */
1450 return TRACE_TYPE_UNHANDLED;
1451
1452 default:
1453 return print_graph_comment(s, entry, iter, flags);
1454 }
1455
1456 return TRACE_TYPE_HANDLED;
1457 }
1458
1459 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1460 print_graph_function(struct trace_iterator *iter)
1461 {
1462 return print_graph_function_flags(iter, tracer_flags.val);
1463 }
1464
1465 static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1466 print_graph_function_event(struct trace_iterator *iter, int flags,
1467 struct trace_event *event)
1468 {
1469 return print_graph_function(iter);
1470 }
1471
print_lat_header(struct seq_file * s,u32 flags)1472 static void print_lat_header(struct seq_file *s, u32 flags)
1473 {
1474 static const char spaces[] = " " /* 16 spaces */
1475 " " /* 4 spaces */
1476 " "; /* 17 spaces */
1477 int size = 0;
1478
1479 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1480 size += 16;
1481 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1482 size += 16;
1483 if (flags & TRACE_GRAPH_PRINT_CPU)
1484 size += 4;
1485 if (flags & TRACE_GRAPH_PRINT_PROC)
1486 size += 17;
1487
1488 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1489 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1490 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1491 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1492 seq_printf(s, "#%.*s||| / \n", size, spaces);
1493 }
1494
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1495 static void __print_graph_headers_flags(struct trace_array *tr,
1496 struct seq_file *s, u32 flags)
1497 {
1498 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1499
1500 if (lat)
1501 print_lat_header(s, flags);
1502
1503 /* 1st line */
1504 seq_putc(s, '#');
1505 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1506 seq_puts(s, " TIME ");
1507 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1508 seq_puts(s, " REL TIME ");
1509 if (flags & TRACE_GRAPH_PRINT_CPU)
1510 seq_puts(s, " CPU");
1511 if (flags & TRACE_GRAPH_PRINT_PROC)
1512 seq_puts(s, " TASK/PID ");
1513 if (lat)
1514 seq_puts(s, "|||| ");
1515 if (flags & TRACE_GRAPH_PRINT_DURATION)
1516 seq_puts(s, " DURATION ");
1517 seq_puts(s, " FUNCTION CALLS\n");
1518
1519 /* 2nd line */
1520 seq_putc(s, '#');
1521 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1522 seq_puts(s, " | ");
1523 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1524 seq_puts(s, " | ");
1525 if (flags & TRACE_GRAPH_PRINT_CPU)
1526 seq_puts(s, " | ");
1527 if (flags & TRACE_GRAPH_PRINT_PROC)
1528 seq_puts(s, " | | ");
1529 if (lat)
1530 seq_puts(s, "|||| ");
1531 if (flags & TRACE_GRAPH_PRINT_DURATION)
1532 seq_puts(s, " | | ");
1533 seq_puts(s, " | | | |\n");
1534 }
1535
print_graph_headers(struct seq_file * s)1536 static void print_graph_headers(struct seq_file *s)
1537 {
1538 print_graph_headers_flags(s, tracer_flags.val);
1539 }
1540
print_graph_headers_flags(struct seq_file * s,u32 flags)1541 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1542 {
1543 struct trace_iterator *iter = s->private;
1544 struct trace_array *tr = iter->tr;
1545
1546 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1547 return;
1548
1549 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1550 /* print nothing if the buffers are empty */
1551 if (trace_empty(iter))
1552 return;
1553
1554 print_trace_header(s, iter);
1555 }
1556
1557 __print_graph_headers_flags(tr, s, flags);
1558 }
1559
graph_trace_open(struct trace_iterator * iter)1560 void graph_trace_open(struct trace_iterator *iter)
1561 {
1562 /* pid and depth on the last trace processed */
1563 struct fgraph_data *data;
1564 gfp_t gfpflags;
1565 int cpu;
1566
1567 iter->private = NULL;
1568
1569 /* We can be called in atomic context via ftrace_dump() */
1570 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1571
1572 data = kzalloc(sizeof(*data), gfpflags);
1573 if (!data)
1574 goto out_err;
1575
1576 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1577 if (!data->cpu_data)
1578 goto out_err_free;
1579
1580 for_each_possible_cpu(cpu) {
1581 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1582 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1583 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1584 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1585
1586 *pid = -1;
1587 *depth = 0;
1588 *ignore = 0;
1589 *depth_irq = -1;
1590 }
1591
1592 iter->private = data;
1593
1594 return;
1595
1596 out_err_free:
1597 kfree(data);
1598 out_err:
1599 pr_warn("function graph tracer: not enough memory\n");
1600 }
1601
graph_trace_close(struct trace_iterator * iter)1602 void graph_trace_close(struct trace_iterator *iter)
1603 {
1604 struct fgraph_data *data = iter->private;
1605
1606 if (data) {
1607 free_percpu(data->cpu_data);
1608 kfree(data);
1609 iter->private = NULL;
1610 }
1611 }
1612
1613 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1614 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1615 {
1616 if (bit == TRACE_GRAPH_PRINT_IRQS)
1617 ftrace_graph_skip_irqs = !set;
1618
1619 if (bit == TRACE_GRAPH_SLEEP_TIME)
1620 ftrace_graph_sleep_time_control(set);
1621
1622 if (bit == TRACE_GRAPH_GRAPH_TIME)
1623 ftrace_graph_graph_time_control(set);
1624
1625 if (bit == TRACE_GRAPH_ARGS)
1626 return ftrace_graph_trace_args(tr, set);
1627
1628 return 0;
1629 }
1630
1631 static struct trace_event_functions graph_functions = {
1632 .trace = print_graph_function_event,
1633 };
1634
1635 static struct trace_event graph_trace_entry_event = {
1636 .type = TRACE_GRAPH_ENT,
1637 .funcs = &graph_functions,
1638 };
1639
1640 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1641 static struct trace_event graph_trace_retaddr_entry_event = {
1642 .type = TRACE_GRAPH_RETADDR_ENT,
1643 .funcs = &graph_functions,
1644 };
1645 #endif
1646
1647 static struct trace_event graph_trace_ret_event = {
1648 .type = TRACE_GRAPH_RET,
1649 .funcs = &graph_functions
1650 };
1651
1652 static struct tracer graph_trace __tracer_data = {
1653 .name = "function_graph",
1654 .update_thresh = graph_trace_update_thresh,
1655 .open = graph_trace_open,
1656 .pipe_open = graph_trace_open,
1657 .close = graph_trace_close,
1658 .pipe_close = graph_trace_close,
1659 .init = graph_trace_init,
1660 .reset = graph_trace_reset,
1661 .print_line = print_graph_function,
1662 .print_header = print_graph_headers,
1663 .flags = &tracer_flags,
1664 .set_flag = func_graph_set_flag,
1665 .allow_instances = true,
1666 #ifdef CONFIG_FTRACE_SELFTEST
1667 .selftest = trace_selftest_startup_function_graph,
1668 #endif
1669 };
1670
1671
1672 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1673 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1674 loff_t *ppos)
1675 {
1676 unsigned long val;
1677 int ret;
1678
1679 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1680 if (ret)
1681 return ret;
1682
1683 fgraph_max_depth = val;
1684
1685 *ppos += cnt;
1686
1687 return cnt;
1688 }
1689
1690 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1691 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1692 loff_t *ppos)
1693 {
1694 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1695 int n;
1696
1697 n = sprintf(buf, "%d\n", fgraph_max_depth);
1698
1699 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1700 }
1701
1702 static const struct file_operations graph_depth_fops = {
1703 .open = tracing_open_generic,
1704 .write = graph_depth_write,
1705 .read = graph_depth_read,
1706 .llseek = generic_file_llseek,
1707 };
1708
init_graph_tracefs(void)1709 static __init int init_graph_tracefs(void)
1710 {
1711 int ret;
1712
1713 ret = tracing_init_dentry();
1714 if (ret)
1715 return 0;
1716
1717 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1718 NULL, &graph_depth_fops);
1719
1720 return 0;
1721 }
1722 fs_initcall(init_graph_tracefs);
1723
init_graph_trace(void)1724 static __init int init_graph_trace(void)
1725 {
1726 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1727
1728 if (!register_trace_event(&graph_trace_entry_event)) {
1729 pr_warn("Warning: could not register graph trace events\n");
1730 return 1;
1731 }
1732
1733 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1734 if (!register_trace_event(&graph_trace_retaddr_entry_event)) {
1735 pr_warn("Warning: could not register graph trace retaddr events\n");
1736 return 1;
1737 }
1738 #endif
1739
1740 if (!register_trace_event(&graph_trace_ret_event)) {
1741 pr_warn("Warning: could not register graph trace events\n");
1742 return 1;
1743 }
1744
1745 return register_tracer(&graph_trace);
1746 }
1747
1748 core_initcall(init_graph_trace);
1749