1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15
16 #include "trace.h"
17 #include "trace_output.h"
18
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21
22 struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29
30 struct fgraph_data {
31 struct fgraph_cpu_data __percpu *cpu_data;
32
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
36 int failed;
37 int cpu;
38 };
39
40 #define TRACE_GRAPH_INDENT 2
41
42 unsigned int fgraph_max_depth;
43
44 static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 /* Display CPU ? */
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 /* Display function return value ? */
63 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 /* Display function return value in hexadecimal format ? */
65 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 /* Include sleep time (scheduled out) between entry and return */
68 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69
70 #ifdef CONFIG_FUNCTION_PROFILER
71 /* Include time within nested functions */
72 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74
75 { } /* Empty entry */
76 };
77
78 static struct tracer_flags tracer_flags = {
79 /* Don't display overruns, proc, or tail by default */
80 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 .opts = trace_opts
84 };
85
86 /*
87 * DURATION column is being also used to display IRQ signs,
88 * following values are used by print_graph_irq and others
89 * to fill in space into DURATION column.
90 */
91 enum {
92 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
93 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 };
96
97 static void
98 print_graph_duration(struct trace_array *tr, unsigned long long duration,
99 struct trace_seq *s, u32 flags);
100
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx)101 int __trace_graph_entry(struct trace_array *tr,
102 struct ftrace_graph_ent *trace,
103 unsigned int trace_ctx)
104 {
105 struct trace_event_call *call = &event_funcgraph_entry;
106 struct ring_buffer_event *event;
107 struct trace_buffer *buffer = tr->array_buffer.buffer;
108 struct ftrace_graph_ent_entry *entry;
109
110 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
111 sizeof(*entry), trace_ctx);
112 if (!event)
113 return 0;
114 entry = ring_buffer_event_data(event);
115 entry->graph_ent = *trace;
116 if (!call_filter_check_discard(call, entry, buffer, event))
117 trace_buffer_unlock_commit_nostack(buffer, event);
118
119 return 1;
120 }
121
ftrace_graph_ignore_irqs(void)122 static inline int ftrace_graph_ignore_irqs(void)
123 {
124 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
125 return 0;
126
127 return in_hardirq();
128 }
129
trace_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)130 int trace_graph_entry(struct ftrace_graph_ent *trace,
131 struct fgraph_ops *gops)
132 {
133 unsigned long *task_var = fgraph_get_task_var(gops);
134 struct trace_array *tr = gops->private;
135 struct trace_array_cpu *data;
136 unsigned long flags;
137 unsigned int trace_ctx;
138 long disabled;
139 int ret;
140 int cpu;
141
142 if (*task_var & TRACE_GRAPH_NOTRACE)
143 return 0;
144
145 /*
146 * Do not trace a function if it's filtered by set_graph_notrace.
147 * Make the index of ret stack negative to indicate that it should
148 * ignore further functions. But it needs its own ret stack entry
149 * to recover the original index in order to continue tracing after
150 * returning from the function.
151 */
152 if (ftrace_graph_notrace_addr(trace->func)) {
153 *task_var |= TRACE_GRAPH_NOTRACE_BIT;
154 /*
155 * Need to return 1 to have the return called
156 * that will clear the NOTRACE bit.
157 */
158 return 1;
159 }
160
161 if (!ftrace_trace_task(tr))
162 return 0;
163
164 if (ftrace_graph_ignore_func(gops, trace))
165 return 0;
166
167 if (ftrace_graph_ignore_irqs())
168 return 0;
169
170 /*
171 * Stop here if tracing_threshold is set. We only write function return
172 * events to the ring buffer.
173 */
174 if (tracing_thresh)
175 return 1;
176
177 local_irq_save(flags);
178 cpu = raw_smp_processor_id();
179 data = per_cpu_ptr(tr->array_buffer.data, cpu);
180 disabled = atomic_inc_return(&data->disabled);
181 if (likely(disabled == 1)) {
182 trace_ctx = tracing_gen_ctx_flags(flags);
183 ret = __trace_graph_entry(tr, trace, trace_ctx);
184 } else {
185 ret = 0;
186 }
187
188 atomic_dec(&data->disabled);
189 local_irq_restore(flags);
190
191 return ret;
192 }
193
194 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned int trace_ctx)195 __trace_graph_function(struct trace_array *tr,
196 unsigned long ip, unsigned int trace_ctx)
197 {
198 u64 time = trace_clock_local();
199 struct ftrace_graph_ent ent = {
200 .func = ip,
201 .depth = 0,
202 };
203 struct ftrace_graph_ret ret = {
204 .func = ip,
205 .depth = 0,
206 .calltime = time,
207 .rettime = time,
208 };
209
210 __trace_graph_entry(tr, &ent, trace_ctx);
211 __trace_graph_return(tr, &ret, trace_ctx);
212 }
213
214 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)215 trace_graph_function(struct trace_array *tr,
216 unsigned long ip, unsigned long parent_ip,
217 unsigned int trace_ctx)
218 {
219 __trace_graph_function(tr, ip, trace_ctx);
220 }
221
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned int trace_ctx)222 void __trace_graph_return(struct trace_array *tr,
223 struct ftrace_graph_ret *trace,
224 unsigned int trace_ctx)
225 {
226 struct trace_event_call *call = &event_funcgraph_exit;
227 struct ring_buffer_event *event;
228 struct trace_buffer *buffer = tr->array_buffer.buffer;
229 struct ftrace_graph_ret_entry *entry;
230
231 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
232 sizeof(*entry), trace_ctx);
233 if (!event)
234 return;
235 entry = ring_buffer_event_data(event);
236 entry->ret = *trace;
237 if (!call_filter_check_discard(call, entry, buffer, event))
238 trace_buffer_unlock_commit_nostack(buffer, event);
239 }
240
trace_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)241 void trace_graph_return(struct ftrace_graph_ret *trace,
242 struct fgraph_ops *gops)
243 {
244 unsigned long *task_var = fgraph_get_task_var(gops);
245 struct trace_array *tr = gops->private;
246 struct trace_array_cpu *data;
247 unsigned long flags;
248 unsigned int trace_ctx;
249 long disabled;
250 int cpu;
251
252 ftrace_graph_addr_finish(gops, trace);
253
254 if (*task_var & TRACE_GRAPH_NOTRACE) {
255 *task_var &= ~TRACE_GRAPH_NOTRACE;
256 return;
257 }
258
259 local_irq_save(flags);
260 cpu = raw_smp_processor_id();
261 data = per_cpu_ptr(tr->array_buffer.data, cpu);
262 disabled = atomic_inc_return(&data->disabled);
263 if (likely(disabled == 1)) {
264 trace_ctx = tracing_gen_ctx_flags(flags);
265 __trace_graph_return(tr, trace, trace_ctx);
266 }
267 atomic_dec(&data->disabled);
268 local_irq_restore(flags);
269 }
270
trace_graph_thresh_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)271 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
272 struct fgraph_ops *gops)
273 {
274 ftrace_graph_addr_finish(gops, trace);
275
276 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278 return;
279 }
280
281 if (tracing_thresh &&
282 (trace->rettime - trace->calltime < tracing_thresh))
283 return;
284 else
285 trace_graph_return(trace, gops);
286 }
287
288 static struct fgraph_ops funcgraph_ops = {
289 .entryfunc = &trace_graph_entry,
290 .retfunc = &trace_graph_return,
291 };
292
allocate_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)293 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
294 {
295 struct fgraph_ops *gops;
296
297 gops = kzalloc(sizeof(*gops), GFP_KERNEL);
298 if (!gops)
299 return -ENOMEM;
300
301 gops->entryfunc = &trace_graph_entry;
302 gops->retfunc = &trace_graph_return;
303
304 tr->gops = gops;
305 gops->private = tr;
306
307 fgraph_init_ops(&gops->ops, ops);
308
309 return 0;
310 }
311
free_fgraph_ops(struct trace_array * tr)312 void free_fgraph_ops(struct trace_array *tr)
313 {
314 kfree(tr->gops);
315 }
316
init_array_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)317 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
318 {
319 tr->gops = &funcgraph_ops;
320 funcgraph_ops.private = tr;
321 fgraph_init_ops(&tr->gops->ops, ops);
322 }
323
graph_trace_init(struct trace_array * tr)324 static int graph_trace_init(struct trace_array *tr)
325 {
326 int ret;
327
328 tr->gops->entryfunc = trace_graph_entry;
329
330 if (tracing_thresh)
331 tr->gops->retfunc = trace_graph_thresh_return;
332 else
333 tr->gops->retfunc = trace_graph_return;
334
335 /* Make gops functions are visible before we start tracing */
336 smp_mb();
337
338 ret = register_ftrace_graph(tr->gops);
339 if (ret)
340 return ret;
341 tracing_start_cmdline_record();
342
343 return 0;
344 }
345
graph_trace_reset(struct trace_array * tr)346 static void graph_trace_reset(struct trace_array *tr)
347 {
348 tracing_stop_cmdline_record();
349 unregister_ftrace_graph(tr->gops);
350 }
351
graph_trace_update_thresh(struct trace_array * tr)352 static int graph_trace_update_thresh(struct trace_array *tr)
353 {
354 graph_trace_reset(tr);
355 return graph_trace_init(tr);
356 }
357
358 static int max_bytes_for_cpu;
359
print_graph_cpu(struct trace_seq * s,int cpu)360 static void print_graph_cpu(struct trace_seq *s, int cpu)
361 {
362 /*
363 * Start with a space character - to make it stand out
364 * to the right a bit when trace output is pasted into
365 * email:
366 */
367 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
368 }
369
370 #define TRACE_GRAPH_PROCINFO_LENGTH 14
371
print_graph_proc(struct trace_seq * s,pid_t pid)372 static void print_graph_proc(struct trace_seq *s, pid_t pid)
373 {
374 char comm[TASK_COMM_LEN];
375 /* sign + log10(MAX_INT) + '\0' */
376 char pid_str[11];
377 int spaces = 0;
378 int len;
379 int i;
380
381 trace_find_cmdline(pid, comm);
382 comm[7] = '\0';
383 sprintf(pid_str, "%d", pid);
384
385 /* 1 stands for the "-" character */
386 len = strlen(comm) + strlen(pid_str) + 1;
387
388 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
389 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
390
391 /* First spaces to align center */
392 for (i = 0; i < spaces / 2; i++)
393 trace_seq_putc(s, ' ');
394
395 trace_seq_printf(s, "%s-%s", comm, pid_str);
396
397 /* Last spaces to align center */
398 for (i = 0; i < spaces - (spaces / 2); i++)
399 trace_seq_putc(s, ' ');
400 }
401
402
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)403 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
404 {
405 trace_seq_putc(s, ' ');
406 trace_print_lat_fmt(s, entry);
407 trace_seq_puts(s, " | ");
408 }
409
410 /* If the pid changed since the last trace, output this event */
411 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)412 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
413 {
414 pid_t prev_pid;
415 pid_t *last_pid;
416
417 if (!data)
418 return;
419
420 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
421
422 if (*last_pid == pid)
423 return;
424
425 prev_pid = *last_pid;
426 *last_pid = pid;
427
428 if (prev_pid == -1)
429 return;
430 /*
431 * Context-switch trace line:
432
433 ------------------------------------------
434 | 1) migration/0--1 => sshd-1755
435 ------------------------------------------
436
437 */
438 trace_seq_puts(s, " ------------------------------------------\n");
439 print_graph_cpu(s, cpu);
440 print_graph_proc(s, prev_pid);
441 trace_seq_puts(s, " => ");
442 print_graph_proc(s, pid);
443 trace_seq_puts(s, "\n ------------------------------------------\n\n");
444 }
445
446 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)447 get_return_for_leaf(struct trace_iterator *iter,
448 struct ftrace_graph_ent_entry *curr)
449 {
450 struct fgraph_data *data = iter->private;
451 struct ring_buffer_iter *ring_iter = NULL;
452 struct ring_buffer_event *event;
453 struct ftrace_graph_ret_entry *next;
454
455 /*
456 * If the previous output failed to write to the seq buffer,
457 * then we just reuse the data from before.
458 */
459 if (data && data->failed) {
460 curr = &data->ent;
461 next = &data->ret;
462 } else {
463
464 ring_iter = trace_buffer_iter(iter, iter->cpu);
465
466 /* First peek to compare current entry and the next one */
467 if (ring_iter)
468 event = ring_buffer_iter_peek(ring_iter, NULL);
469 else {
470 /*
471 * We need to consume the current entry to see
472 * the next one.
473 */
474 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
475 NULL, NULL);
476 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
477 NULL, NULL);
478 }
479
480 if (!event)
481 return NULL;
482
483 next = ring_buffer_event_data(event);
484
485 if (data) {
486 /*
487 * Save current and next entries for later reference
488 * if the output fails.
489 */
490 data->ent = *curr;
491 /*
492 * If the next event is not a return type, then
493 * we only care about what type it is. Otherwise we can
494 * safely copy the entire event.
495 */
496 if (next->ent.type == TRACE_GRAPH_RET)
497 data->ret = *next;
498 else
499 data->ret.ent.type = next->ent.type;
500 }
501 }
502
503 if (next->ent.type != TRACE_GRAPH_RET)
504 return NULL;
505
506 if (curr->ent.pid != next->ent.pid ||
507 curr->graph_ent.func != next->ret.func)
508 return NULL;
509
510 /* this is a leaf, now advance the iterator */
511 if (ring_iter)
512 ring_buffer_iter_advance(ring_iter);
513
514 return next;
515 }
516
print_graph_abs_time(u64 t,struct trace_seq * s)517 static void print_graph_abs_time(u64 t, struct trace_seq *s)
518 {
519 unsigned long usecs_rem;
520
521 usecs_rem = do_div(t, NSEC_PER_SEC);
522 usecs_rem /= 1000;
523
524 trace_seq_printf(s, "%5lu.%06lu | ",
525 (unsigned long)t, usecs_rem);
526 }
527
528 static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)529 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
530 {
531 unsigned long long usecs;
532
533 usecs = iter->ts - iter->array_buffer->time_start;
534 do_div(usecs, NSEC_PER_USEC);
535
536 trace_seq_printf(s, "%9llu us | ", usecs);
537 }
538
539 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)540 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
541 enum trace_type type, int cpu, pid_t pid, u32 flags)
542 {
543 struct trace_array *tr = iter->tr;
544 struct trace_seq *s = &iter->seq;
545 struct trace_entry *ent = iter->ent;
546
547 if (addr < (unsigned long)__irqentry_text_start ||
548 addr >= (unsigned long)__irqentry_text_end)
549 return;
550
551 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
552 /* Absolute time */
553 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
554 print_graph_abs_time(iter->ts, s);
555
556 /* Relative time */
557 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
558 print_graph_rel_time(iter, s);
559
560 /* Cpu */
561 if (flags & TRACE_GRAPH_PRINT_CPU)
562 print_graph_cpu(s, cpu);
563
564 /* Proc */
565 if (flags & TRACE_GRAPH_PRINT_PROC) {
566 print_graph_proc(s, pid);
567 trace_seq_puts(s, " | ");
568 }
569
570 /* Latency format */
571 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
572 print_graph_lat_fmt(s, ent);
573 }
574
575 /* No overhead */
576 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
577
578 if (type == TRACE_GRAPH_ENT)
579 trace_seq_puts(s, "==========>");
580 else
581 trace_seq_puts(s, "<==========");
582
583 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
584 trace_seq_putc(s, '\n');
585 }
586
587 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)588 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
589 {
590 unsigned long nsecs_rem = do_div(duration, 1000);
591 /* log10(ULONG_MAX) + '\0' */
592 char usecs_str[21];
593 char nsecs_str[5];
594 int len;
595 int i;
596
597 sprintf(usecs_str, "%lu", (unsigned long) duration);
598
599 /* Print msecs */
600 trace_seq_printf(s, "%s", usecs_str);
601
602 len = strlen(usecs_str);
603
604 /* Print nsecs (we don't want to exceed 7 numbers) */
605 if (len < 7) {
606 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
607
608 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
609 trace_seq_printf(s, ".%s", nsecs_str);
610 len += strlen(nsecs_str) + 1;
611 }
612
613 trace_seq_puts(s, " us ");
614
615 /* Print remaining spaces to fit the row's width */
616 for (i = len; i < 8; i++)
617 trace_seq_putc(s, ' ');
618 }
619
620 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)621 print_graph_duration(struct trace_array *tr, unsigned long long duration,
622 struct trace_seq *s, u32 flags)
623 {
624 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
625 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
626 return;
627
628 /* No real adata, just filling the column with spaces */
629 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
630 case FLAGS_FILL_FULL:
631 trace_seq_puts(s, " | ");
632 return;
633 case FLAGS_FILL_START:
634 trace_seq_puts(s, " ");
635 return;
636 case FLAGS_FILL_END:
637 trace_seq_puts(s, " |");
638 return;
639 }
640
641 /* Signal a overhead of time execution to the output */
642 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
643 trace_seq_printf(s, "%c ", trace_find_mark(duration));
644 else
645 trace_seq_puts(s, " ");
646
647 trace_print_graph_duration(duration, s);
648 trace_seq_puts(s, "| ");
649 }
650
651 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
652
653 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
654
print_graph_retval(struct trace_seq * s,unsigned long retval,bool leaf,void * func,bool hex_format)655 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
656 bool leaf, void *func, bool hex_format)
657 {
658 unsigned long err_code = 0;
659
660 if (retval == 0 || hex_format)
661 goto done;
662
663 /* Check if the return value matches the negative format */
664 if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
665 (((u64)retval) >> 32) == 0) {
666 /* sign extension */
667 err_code = (unsigned long)(s32)retval;
668 } else {
669 err_code = retval;
670 }
671
672 if (!IS_ERR_VALUE(err_code))
673 err_code = 0;
674
675 done:
676 if (leaf) {
677 if (hex_format || (err_code == 0))
678 trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
679 func, retval);
680 else
681 trace_seq_printf(s, "%ps(); /* = %ld */\n",
682 func, err_code);
683 } else {
684 if (hex_format || (err_code == 0))
685 trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
686 func, retval);
687 else
688 trace_seq_printf(s, "} /* %ps = %ld */\n",
689 func, err_code);
690 }
691 }
692
693 #else
694
695 #define __TRACE_GRAPH_PRINT_RETVAL 0
696
697 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
698
699 #endif
700
701 /* Case of a leaf function on its call entry */
702 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)703 print_graph_entry_leaf(struct trace_iterator *iter,
704 struct ftrace_graph_ent_entry *entry,
705 struct ftrace_graph_ret_entry *ret_entry,
706 struct trace_seq *s, u32 flags)
707 {
708 struct fgraph_data *data = iter->private;
709 struct trace_array *tr = iter->tr;
710 struct ftrace_graph_ret *graph_ret;
711 struct ftrace_graph_ent *call;
712 unsigned long long duration;
713 int cpu = iter->cpu;
714 int i;
715
716 graph_ret = &ret_entry->ret;
717 call = &entry->graph_ent;
718 duration = graph_ret->rettime - graph_ret->calltime;
719
720 if (data) {
721 struct fgraph_cpu_data *cpu_data;
722
723 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
724
725 /*
726 * Comments display at + 1 to depth. Since
727 * this is a leaf function, keep the comments
728 * equal to this depth.
729 */
730 cpu_data->depth = call->depth - 1;
731
732 /* No need to keep this function around for this depth */
733 if (call->depth < FTRACE_RETFUNC_DEPTH &&
734 !WARN_ON_ONCE(call->depth < 0))
735 cpu_data->enter_funcs[call->depth] = 0;
736 }
737
738 /* Overhead and duration */
739 print_graph_duration(tr, duration, s, flags);
740
741 /* Function */
742 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
743 trace_seq_putc(s, ' ');
744
745 /*
746 * Write out the function return value if the option function-retval is
747 * enabled.
748 */
749 if (flags & __TRACE_GRAPH_PRINT_RETVAL)
750 print_graph_retval(s, graph_ret->retval, true, (void *)call->func,
751 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
752 else
753 trace_seq_printf(s, "%ps();\n", (void *)call->func);
754
755 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
756 cpu, iter->ent->pid, flags);
757
758 return trace_handle_return(s);
759 }
760
761 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)762 print_graph_entry_nested(struct trace_iterator *iter,
763 struct ftrace_graph_ent_entry *entry,
764 struct trace_seq *s, int cpu, u32 flags)
765 {
766 struct ftrace_graph_ent *call = &entry->graph_ent;
767 struct fgraph_data *data = iter->private;
768 struct trace_array *tr = iter->tr;
769 int i;
770
771 if (data) {
772 struct fgraph_cpu_data *cpu_data;
773 int cpu = iter->cpu;
774
775 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
776 cpu_data->depth = call->depth;
777
778 /* Save this function pointer to see if the exit matches */
779 if (call->depth < FTRACE_RETFUNC_DEPTH &&
780 !WARN_ON_ONCE(call->depth < 0))
781 cpu_data->enter_funcs[call->depth] = call->func;
782 }
783
784 /* No time */
785 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
786
787 /* Function */
788 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
789 trace_seq_putc(s, ' ');
790
791 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
792
793 if (trace_seq_has_overflowed(s))
794 return TRACE_TYPE_PARTIAL_LINE;
795
796 /*
797 * we already consumed the current entry to check the next one
798 * and see if this is a leaf.
799 */
800 return TRACE_TYPE_NO_CONSUME;
801 }
802
803 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)804 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
805 int type, unsigned long addr, u32 flags)
806 {
807 struct fgraph_data *data = iter->private;
808 struct trace_entry *ent = iter->ent;
809 struct trace_array *tr = iter->tr;
810 int cpu = iter->cpu;
811
812 /* Pid */
813 verif_pid(s, ent->pid, cpu, data);
814
815 if (type)
816 /* Interrupt */
817 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
818
819 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
820 return;
821
822 /* Absolute time */
823 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
824 print_graph_abs_time(iter->ts, s);
825
826 /* Relative time */
827 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
828 print_graph_rel_time(iter, s);
829
830 /* Cpu */
831 if (flags & TRACE_GRAPH_PRINT_CPU)
832 print_graph_cpu(s, cpu);
833
834 /* Proc */
835 if (flags & TRACE_GRAPH_PRINT_PROC) {
836 print_graph_proc(s, ent->pid);
837 trace_seq_puts(s, " | ");
838 }
839
840 /* Latency format */
841 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
842 print_graph_lat_fmt(s, ent);
843
844 return;
845 }
846
847 /*
848 * Entry check for irq code
849 *
850 * returns 1 if
851 * - we are inside irq code
852 * - we just entered irq code
853 *
854 * returns 0 if
855 * - funcgraph-interrupts option is set
856 * - we are not inside irq code
857 */
858 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)859 check_irq_entry(struct trace_iterator *iter, u32 flags,
860 unsigned long addr, int depth)
861 {
862 int cpu = iter->cpu;
863 int *depth_irq;
864 struct fgraph_data *data = iter->private;
865
866 /*
867 * If we are either displaying irqs, or we got called as
868 * a graph event and private data does not exist,
869 * then we bypass the irq check.
870 */
871 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
872 (!data))
873 return 0;
874
875 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
876
877 /*
878 * We are inside the irq code
879 */
880 if (*depth_irq >= 0)
881 return 1;
882
883 if ((addr < (unsigned long)__irqentry_text_start) ||
884 (addr >= (unsigned long)__irqentry_text_end))
885 return 0;
886
887 /*
888 * We are entering irq code.
889 */
890 *depth_irq = depth;
891 return 1;
892 }
893
894 /*
895 * Return check for irq code
896 *
897 * returns 1 if
898 * - we are inside irq code
899 * - we just left irq code
900 *
901 * returns 0 if
902 * - funcgraph-interrupts option is set
903 * - we are not inside irq code
904 */
905 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)906 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
907 {
908 int cpu = iter->cpu;
909 int *depth_irq;
910 struct fgraph_data *data = iter->private;
911
912 /*
913 * If we are either displaying irqs, or we got called as
914 * a graph event and private data does not exist,
915 * then we bypass the irq check.
916 */
917 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
918 (!data))
919 return 0;
920
921 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
922
923 /*
924 * We are not inside the irq code.
925 */
926 if (*depth_irq == -1)
927 return 0;
928
929 /*
930 * We are inside the irq code, and this is returning entry.
931 * Let's not trace it and clear the entry depth, since
932 * we are out of irq code.
933 *
934 * This condition ensures that we 'leave the irq code' once
935 * we are out of the entry depth. Thus protecting us from
936 * the RETURN entry loss.
937 */
938 if (*depth_irq >= depth) {
939 *depth_irq = -1;
940 return 1;
941 }
942
943 /*
944 * We are inside the irq code, and this is not the entry.
945 */
946 return 1;
947 }
948
949 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)950 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
951 struct trace_iterator *iter, u32 flags)
952 {
953 struct fgraph_data *data = iter->private;
954 struct ftrace_graph_ent *call = &field->graph_ent;
955 struct ftrace_graph_ret_entry *leaf_ret;
956 static enum print_line_t ret;
957 int cpu = iter->cpu;
958
959 if (check_irq_entry(iter, flags, call->func, call->depth))
960 return TRACE_TYPE_HANDLED;
961
962 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
963
964 leaf_ret = get_return_for_leaf(iter, field);
965 if (leaf_ret)
966 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
967 else
968 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
969
970 if (data) {
971 /*
972 * If we failed to write our output, then we need to make
973 * note of it. Because we already consumed our entry.
974 */
975 if (s->full) {
976 data->failed = 1;
977 data->cpu = cpu;
978 } else
979 data->failed = 0;
980 }
981
982 return ret;
983 }
984
985 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)986 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
987 struct trace_entry *ent, struct trace_iterator *iter,
988 u32 flags)
989 {
990 unsigned long long duration = trace->rettime - trace->calltime;
991 struct fgraph_data *data = iter->private;
992 struct trace_array *tr = iter->tr;
993 pid_t pid = ent->pid;
994 int cpu = iter->cpu;
995 int func_match = 1;
996 int i;
997
998 if (check_irq_return(iter, flags, trace->depth))
999 return TRACE_TYPE_HANDLED;
1000
1001 if (data) {
1002 struct fgraph_cpu_data *cpu_data;
1003 int cpu = iter->cpu;
1004
1005 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1006
1007 /*
1008 * Comments display at + 1 to depth. This is the
1009 * return from a function, we now want the comments
1010 * to display at the same level of the bracket.
1011 */
1012 cpu_data->depth = trace->depth - 1;
1013
1014 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1015 !WARN_ON_ONCE(trace->depth < 0)) {
1016 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1017 func_match = 0;
1018 cpu_data->enter_funcs[trace->depth] = 0;
1019 }
1020 }
1021
1022 print_graph_prologue(iter, s, 0, 0, flags);
1023
1024 /* Overhead and duration */
1025 print_graph_duration(tr, duration, s, flags);
1026
1027 /* Closing brace */
1028 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1029 trace_seq_putc(s, ' ');
1030
1031 /*
1032 * Always write out the function name and its return value if the
1033 * function-retval option is enabled.
1034 */
1035 if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1036 print_graph_retval(s, trace->retval, false, (void *)trace->func,
1037 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1038 } else {
1039 /*
1040 * If the return function does not have a matching entry,
1041 * then the entry was lost. Instead of just printing
1042 * the '}' and letting the user guess what function this
1043 * belongs to, write out the function name. Always do
1044 * that if the funcgraph-tail option is enabled.
1045 */
1046 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1047 trace_seq_puts(s, "}\n");
1048 else
1049 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1050 }
1051
1052 /* Overrun */
1053 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1054 trace_seq_printf(s, " (Overruns: %u)\n",
1055 trace->overrun);
1056
1057 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1058 cpu, pid, flags);
1059
1060 return trace_handle_return(s);
1061 }
1062
1063 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1064 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1065 struct trace_iterator *iter, u32 flags)
1066 {
1067 struct trace_array *tr = iter->tr;
1068 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1069 struct fgraph_data *data = iter->private;
1070 struct trace_event *event;
1071 int depth = 0;
1072 int ret;
1073 int i;
1074
1075 if (data)
1076 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1077
1078 print_graph_prologue(iter, s, 0, 0, flags);
1079
1080 /* No time */
1081 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1082
1083 /* Indentation */
1084 if (depth > 0)
1085 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1086 trace_seq_putc(s, ' ');
1087
1088 /* The comment */
1089 trace_seq_puts(s, "/* ");
1090
1091 switch (iter->ent->type) {
1092 case TRACE_BPUTS:
1093 ret = trace_print_bputs_msg_only(iter);
1094 if (ret != TRACE_TYPE_HANDLED)
1095 return ret;
1096 break;
1097 case TRACE_BPRINT:
1098 ret = trace_print_bprintk_msg_only(iter);
1099 if (ret != TRACE_TYPE_HANDLED)
1100 return ret;
1101 break;
1102 case TRACE_PRINT:
1103 ret = trace_print_printk_msg_only(iter);
1104 if (ret != TRACE_TYPE_HANDLED)
1105 return ret;
1106 break;
1107 default:
1108 event = ftrace_find_event(ent->type);
1109 if (!event)
1110 return TRACE_TYPE_UNHANDLED;
1111
1112 ret = event->funcs->trace(iter, sym_flags, event);
1113 if (ret != TRACE_TYPE_HANDLED)
1114 return ret;
1115 }
1116
1117 if (trace_seq_has_overflowed(s))
1118 goto out;
1119
1120 /* Strip ending newline */
1121 if (s->buffer[s->seq.len - 1] == '\n') {
1122 s->buffer[s->seq.len - 1] = '\0';
1123 s->seq.len--;
1124 }
1125
1126 trace_seq_puts(s, " */\n");
1127 out:
1128 return trace_handle_return(s);
1129 }
1130
1131
1132 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1133 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1134 {
1135 struct ftrace_graph_ent_entry *field;
1136 struct fgraph_data *data = iter->private;
1137 struct trace_entry *entry = iter->ent;
1138 struct trace_seq *s = &iter->seq;
1139 int cpu = iter->cpu;
1140 int ret;
1141
1142 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1143 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1144 return TRACE_TYPE_HANDLED;
1145 }
1146
1147 /*
1148 * If the last output failed, there's a possibility we need
1149 * to print out the missing entry which would never go out.
1150 */
1151 if (data && data->failed) {
1152 field = &data->ent;
1153 iter->cpu = data->cpu;
1154 ret = print_graph_entry(field, s, iter, flags);
1155 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1156 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1157 ret = TRACE_TYPE_NO_CONSUME;
1158 }
1159 iter->cpu = cpu;
1160 return ret;
1161 }
1162
1163 switch (entry->type) {
1164 case TRACE_GRAPH_ENT: {
1165 /*
1166 * print_graph_entry() may consume the current event,
1167 * thus @field may become invalid, so we need to save it.
1168 * sizeof(struct ftrace_graph_ent_entry) is very small,
1169 * it can be safely saved at the stack.
1170 */
1171 struct ftrace_graph_ent_entry saved;
1172 trace_assign_type(field, entry);
1173 saved = *field;
1174 return print_graph_entry(&saved, s, iter, flags);
1175 }
1176 case TRACE_GRAPH_RET: {
1177 struct ftrace_graph_ret_entry *field;
1178 trace_assign_type(field, entry);
1179 return print_graph_return(&field->ret, s, entry, iter, flags);
1180 }
1181 case TRACE_STACK:
1182 case TRACE_FN:
1183 /* dont trace stack and functions as comments */
1184 return TRACE_TYPE_UNHANDLED;
1185
1186 default:
1187 return print_graph_comment(s, entry, iter, flags);
1188 }
1189
1190 return TRACE_TYPE_HANDLED;
1191 }
1192
1193 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1194 print_graph_function(struct trace_iterator *iter)
1195 {
1196 return print_graph_function_flags(iter, tracer_flags.val);
1197 }
1198
1199 static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1200 print_graph_function_event(struct trace_iterator *iter, int flags,
1201 struct trace_event *event)
1202 {
1203 return print_graph_function(iter);
1204 }
1205
print_lat_header(struct seq_file * s,u32 flags)1206 static void print_lat_header(struct seq_file *s, u32 flags)
1207 {
1208 static const char spaces[] = " " /* 16 spaces */
1209 " " /* 4 spaces */
1210 " "; /* 17 spaces */
1211 int size = 0;
1212
1213 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1214 size += 16;
1215 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1216 size += 16;
1217 if (flags & TRACE_GRAPH_PRINT_CPU)
1218 size += 4;
1219 if (flags & TRACE_GRAPH_PRINT_PROC)
1220 size += 17;
1221
1222 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1223 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1224 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1225 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1226 seq_printf(s, "#%.*s||| / \n", size, spaces);
1227 }
1228
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1229 static void __print_graph_headers_flags(struct trace_array *tr,
1230 struct seq_file *s, u32 flags)
1231 {
1232 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1233
1234 if (lat)
1235 print_lat_header(s, flags);
1236
1237 /* 1st line */
1238 seq_putc(s, '#');
1239 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1240 seq_puts(s, " TIME ");
1241 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1242 seq_puts(s, " REL TIME ");
1243 if (flags & TRACE_GRAPH_PRINT_CPU)
1244 seq_puts(s, " CPU");
1245 if (flags & TRACE_GRAPH_PRINT_PROC)
1246 seq_puts(s, " TASK/PID ");
1247 if (lat)
1248 seq_puts(s, "|||| ");
1249 if (flags & TRACE_GRAPH_PRINT_DURATION)
1250 seq_puts(s, " DURATION ");
1251 seq_puts(s, " FUNCTION CALLS\n");
1252
1253 /* 2nd line */
1254 seq_putc(s, '#');
1255 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1256 seq_puts(s, " | ");
1257 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1258 seq_puts(s, " | ");
1259 if (flags & TRACE_GRAPH_PRINT_CPU)
1260 seq_puts(s, " | ");
1261 if (flags & TRACE_GRAPH_PRINT_PROC)
1262 seq_puts(s, " | | ");
1263 if (lat)
1264 seq_puts(s, "|||| ");
1265 if (flags & TRACE_GRAPH_PRINT_DURATION)
1266 seq_puts(s, " | | ");
1267 seq_puts(s, " | | | |\n");
1268 }
1269
print_graph_headers(struct seq_file * s)1270 static void print_graph_headers(struct seq_file *s)
1271 {
1272 print_graph_headers_flags(s, tracer_flags.val);
1273 }
1274
print_graph_headers_flags(struct seq_file * s,u32 flags)1275 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1276 {
1277 struct trace_iterator *iter = s->private;
1278 struct trace_array *tr = iter->tr;
1279
1280 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1281 return;
1282
1283 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1284 /* print nothing if the buffers are empty */
1285 if (trace_empty(iter))
1286 return;
1287
1288 print_trace_header(s, iter);
1289 }
1290
1291 __print_graph_headers_flags(tr, s, flags);
1292 }
1293
graph_trace_open(struct trace_iterator * iter)1294 void graph_trace_open(struct trace_iterator *iter)
1295 {
1296 /* pid and depth on the last trace processed */
1297 struct fgraph_data *data;
1298 gfp_t gfpflags;
1299 int cpu;
1300
1301 iter->private = NULL;
1302
1303 /* We can be called in atomic context via ftrace_dump() */
1304 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1305
1306 data = kzalloc(sizeof(*data), gfpflags);
1307 if (!data)
1308 goto out_err;
1309
1310 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1311 if (!data->cpu_data)
1312 goto out_err_free;
1313
1314 for_each_possible_cpu(cpu) {
1315 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1316 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1317 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1318 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1319
1320 *pid = -1;
1321 *depth = 0;
1322 *ignore = 0;
1323 *depth_irq = -1;
1324 }
1325
1326 iter->private = data;
1327
1328 return;
1329
1330 out_err_free:
1331 kfree(data);
1332 out_err:
1333 pr_warn("function graph tracer: not enough memory\n");
1334 }
1335
graph_trace_close(struct trace_iterator * iter)1336 void graph_trace_close(struct trace_iterator *iter)
1337 {
1338 struct fgraph_data *data = iter->private;
1339
1340 if (data) {
1341 free_percpu(data->cpu_data);
1342 kfree(data);
1343 }
1344 }
1345
1346 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1347 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1348 {
1349 if (bit == TRACE_GRAPH_PRINT_IRQS)
1350 ftrace_graph_skip_irqs = !set;
1351
1352 if (bit == TRACE_GRAPH_SLEEP_TIME)
1353 ftrace_graph_sleep_time_control(set);
1354
1355 if (bit == TRACE_GRAPH_GRAPH_TIME)
1356 ftrace_graph_graph_time_control(set);
1357
1358 return 0;
1359 }
1360
1361 static struct trace_event_functions graph_functions = {
1362 .trace = print_graph_function_event,
1363 };
1364
1365 static struct trace_event graph_trace_entry_event = {
1366 .type = TRACE_GRAPH_ENT,
1367 .funcs = &graph_functions,
1368 };
1369
1370 static struct trace_event graph_trace_ret_event = {
1371 .type = TRACE_GRAPH_RET,
1372 .funcs = &graph_functions
1373 };
1374
1375 static struct tracer graph_trace __tracer_data = {
1376 .name = "function_graph",
1377 .update_thresh = graph_trace_update_thresh,
1378 .open = graph_trace_open,
1379 .pipe_open = graph_trace_open,
1380 .close = graph_trace_close,
1381 .pipe_close = graph_trace_close,
1382 .init = graph_trace_init,
1383 .reset = graph_trace_reset,
1384 .print_line = print_graph_function,
1385 .print_header = print_graph_headers,
1386 .flags = &tracer_flags,
1387 .set_flag = func_graph_set_flag,
1388 .allow_instances = true,
1389 #ifdef CONFIG_FTRACE_SELFTEST
1390 .selftest = trace_selftest_startup_function_graph,
1391 #endif
1392 };
1393
1394
1395 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1396 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1397 loff_t *ppos)
1398 {
1399 unsigned long val;
1400 int ret;
1401
1402 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1403 if (ret)
1404 return ret;
1405
1406 fgraph_max_depth = val;
1407
1408 *ppos += cnt;
1409
1410 return cnt;
1411 }
1412
1413 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1414 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1415 loff_t *ppos)
1416 {
1417 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1418 int n;
1419
1420 n = sprintf(buf, "%d\n", fgraph_max_depth);
1421
1422 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1423 }
1424
1425 static const struct file_operations graph_depth_fops = {
1426 .open = tracing_open_generic,
1427 .write = graph_depth_write,
1428 .read = graph_depth_read,
1429 .llseek = generic_file_llseek,
1430 };
1431
init_graph_tracefs(void)1432 static __init int init_graph_tracefs(void)
1433 {
1434 int ret;
1435
1436 ret = tracing_init_dentry();
1437 if (ret)
1438 return 0;
1439
1440 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1441 NULL, &graph_depth_fops);
1442
1443 return 0;
1444 }
1445 fs_initcall(init_graph_tracefs);
1446
init_graph_trace(void)1447 static __init int init_graph_trace(void)
1448 {
1449 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1450
1451 if (!register_trace_event(&graph_trace_entry_event)) {
1452 pr_warn("Warning: could not register graph trace events\n");
1453 return 1;
1454 }
1455
1456 if (!register_trace_event(&graph_trace_ret_event)) {
1457 pr_warn("Warning: could not register graph trace events\n");
1458 return 1;
1459 }
1460
1461 return register_tracer(&graph_trace);
1462 }
1463
1464 core_initcall(init_graph_trace);
1465