1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15
16 #include "trace.h"
17 #include "trace_output.h"
18
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21
22 struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29
30 struct fgraph_data {
31 struct fgraph_cpu_data __percpu *cpu_data;
32
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
36 int failed;
37 int cpu;
38 };
39
40 #define TRACE_GRAPH_INDENT 2
41
42 unsigned int fgraph_max_depth;
43
44 static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 /* Display CPU ? */
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
62 /* Display function return value ? */
63 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
64 /* Display function return value in hexadecimal format ? */
65 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
66 #endif
67 /* Include sleep time (scheduled out) between entry and return */
68 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
69
70 #ifdef CONFIG_FUNCTION_PROFILER
71 /* Include time within nested functions */
72 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
73 #endif
74
75 { } /* Empty entry */
76 };
77
78 static struct tracer_flags tracer_flags = {
79 /* Don't display overruns, proc, or tail by default */
80 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
81 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
82 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
83 .opts = trace_opts
84 };
85
86 /*
87 * DURATION column is being also used to display IRQ signs,
88 * following values are used by print_graph_irq and others
89 * to fill in space into DURATION column.
90 */
91 enum {
92 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
93 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
94 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
95 };
96
97 static void
98 print_graph_duration(struct trace_array *tr, unsigned long long duration,
99 struct trace_seq *s, u32 flags);
100
__trace_graph_entry(struct trace_array * tr,struct ftrace_graph_ent * trace,unsigned int trace_ctx)101 int __trace_graph_entry(struct trace_array *tr,
102 struct ftrace_graph_ent *trace,
103 unsigned int trace_ctx)
104 {
105 struct trace_event_call *call = &event_funcgraph_entry;
106 struct ring_buffer_event *event;
107 struct trace_buffer *buffer = tr->array_buffer.buffer;
108 struct ftrace_graph_ent_entry *entry;
109
110 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
111 sizeof(*entry), trace_ctx);
112 if (!event)
113 return 0;
114 entry = ring_buffer_event_data(event);
115 entry->graph_ent = *trace;
116 if (!call_filter_check_discard(call, entry, buffer, event))
117 trace_buffer_unlock_commit_nostack(buffer, event);
118
119 return 1;
120 }
121
ftrace_graph_ignore_irqs(void)122 static inline int ftrace_graph_ignore_irqs(void)
123 {
124 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
125 return 0;
126
127 return in_hardirq();
128 }
129
trace_graph_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops)130 int trace_graph_entry(struct ftrace_graph_ent *trace,
131 struct fgraph_ops *gops)
132 {
133 unsigned long *task_var = fgraph_get_task_var(gops);
134 struct trace_array *tr = gops->private;
135 struct trace_array_cpu *data;
136 unsigned long flags;
137 unsigned int trace_ctx;
138 long disabled;
139 int ret;
140 int cpu;
141
142 if (*task_var & TRACE_GRAPH_NOTRACE)
143 return 0;
144
145 /*
146 * Do not trace a function if it's filtered by set_graph_notrace.
147 * Make the index of ret stack negative to indicate that it should
148 * ignore further functions. But it needs its own ret stack entry
149 * to recover the original index in order to continue tracing after
150 * returning from the function.
151 */
152 if (ftrace_graph_notrace_addr(trace->func)) {
153 *task_var |= TRACE_GRAPH_NOTRACE_BIT;
154 /*
155 * Need to return 1 to have the return called
156 * that will clear the NOTRACE bit.
157 */
158 return 1;
159 }
160
161 if (!ftrace_trace_task(tr))
162 return 0;
163
164 if (ftrace_graph_ignore_func(gops, trace))
165 return 0;
166
167 if (ftrace_graph_ignore_irqs())
168 return 0;
169
170 /*
171 * Stop here if tracing_threshold is set. We only write function return
172 * events to the ring buffer.
173 */
174 if (tracing_thresh)
175 return 1;
176
177 local_irq_save(flags);
178 cpu = raw_smp_processor_id();
179 data = per_cpu_ptr(tr->array_buffer.data, cpu);
180 disabled = atomic_inc_return(&data->disabled);
181 if (likely(disabled == 1)) {
182 trace_ctx = tracing_gen_ctx_flags(flags);
183 ret = __trace_graph_entry(tr, trace, trace_ctx);
184 } else {
185 ret = 0;
186 }
187
188 atomic_dec(&data->disabled);
189 local_irq_restore(flags);
190
191 return ret;
192 }
193
194 static void
__trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned int trace_ctx)195 __trace_graph_function(struct trace_array *tr,
196 unsigned long ip, unsigned int trace_ctx)
197 {
198 u64 time = trace_clock_local();
199 struct ftrace_graph_ent ent = {
200 .func = ip,
201 .depth = 0,
202 };
203 struct ftrace_graph_ret ret = {
204 .func = ip,
205 .depth = 0,
206 .calltime = time,
207 .rettime = time,
208 };
209
210 __trace_graph_entry(tr, &ent, trace_ctx);
211 __trace_graph_return(tr, &ret, trace_ctx);
212 }
213
214 void
trace_graph_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)215 trace_graph_function(struct trace_array *tr,
216 unsigned long ip, unsigned long parent_ip,
217 unsigned int trace_ctx)
218 {
219 __trace_graph_function(tr, ip, trace_ctx);
220 }
221
__trace_graph_return(struct trace_array * tr,struct ftrace_graph_ret * trace,unsigned int trace_ctx)222 void __trace_graph_return(struct trace_array *tr,
223 struct ftrace_graph_ret *trace,
224 unsigned int trace_ctx)
225 {
226 struct trace_event_call *call = &event_funcgraph_exit;
227 struct ring_buffer_event *event;
228 struct trace_buffer *buffer = tr->array_buffer.buffer;
229 struct ftrace_graph_ret_entry *entry;
230
231 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
232 sizeof(*entry), trace_ctx);
233 if (!event)
234 return;
235 entry = ring_buffer_event_data(event);
236 entry->ret = *trace;
237 if (!call_filter_check_discard(call, entry, buffer, event))
238 trace_buffer_unlock_commit_nostack(buffer, event);
239 }
240
trace_graph_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)241 void trace_graph_return(struct ftrace_graph_ret *trace,
242 struct fgraph_ops *gops)
243 {
244 unsigned long *task_var = fgraph_get_task_var(gops);
245 struct trace_array *tr = gops->private;
246 struct trace_array_cpu *data;
247 unsigned long flags;
248 unsigned int trace_ctx;
249 long disabled;
250 int cpu;
251
252 ftrace_graph_addr_finish(gops, trace);
253
254 if (*task_var & TRACE_GRAPH_NOTRACE) {
255 *task_var &= ~TRACE_GRAPH_NOTRACE;
256 return;
257 }
258
259 local_irq_save(flags);
260 cpu = raw_smp_processor_id();
261 data = per_cpu_ptr(tr->array_buffer.data, cpu);
262 disabled = atomic_inc_return(&data->disabled);
263 if (likely(disabled == 1)) {
264 trace_ctx = tracing_gen_ctx_flags(flags);
265 __trace_graph_return(tr, trace, trace_ctx);
266 }
267 atomic_dec(&data->disabled);
268 local_irq_restore(flags);
269 }
270
trace_graph_thresh_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops)271 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
272 struct fgraph_ops *gops)
273 {
274 ftrace_graph_addr_finish(gops, trace);
275
276 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278 return;
279 }
280
281 if (tracing_thresh &&
282 (trace->rettime - trace->calltime < tracing_thresh))
283 return;
284 else
285 trace_graph_return(trace, gops);
286 }
287
288 static struct fgraph_ops funcgraph_ops = {
289 .entryfunc = &trace_graph_entry,
290 .retfunc = &trace_graph_return,
291 };
292
allocate_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)293 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
294 {
295 struct fgraph_ops *gops;
296
297 gops = kzalloc(sizeof(*gops), GFP_KERNEL);
298 if (!gops)
299 return -ENOMEM;
300
301 gops->entryfunc = &trace_graph_entry;
302 gops->retfunc = &trace_graph_return;
303
304 tr->gops = gops;
305 gops->private = tr;
306
307 fgraph_init_ops(&gops->ops, ops);
308
309 return 0;
310 }
311
free_fgraph_ops(struct trace_array * tr)312 void free_fgraph_ops(struct trace_array *tr)
313 {
314 kfree(tr->gops);
315 }
316
init_array_fgraph_ops(struct trace_array * tr,struct ftrace_ops * ops)317 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
318 {
319 tr->gops = &funcgraph_ops;
320 funcgraph_ops.private = tr;
321 fgraph_init_ops(&tr->gops->ops, ops);
322 }
323
graph_trace_init(struct trace_array * tr)324 static int graph_trace_init(struct trace_array *tr)
325 {
326 int ret;
327
328 tr->gops->entryfunc = trace_graph_entry;
329
330 if (tracing_thresh)
331 tr->gops->retfunc = trace_graph_thresh_return;
332 else
333 tr->gops->retfunc = trace_graph_return;
334
335 /* Make gops functions are visible before we start tracing */
336 smp_mb();
337
338 ret = register_ftrace_graph(tr->gops);
339 if (ret)
340 return ret;
341 tracing_start_cmdline_record();
342
343 return 0;
344 }
345
graph_trace_reset(struct trace_array * tr)346 static void graph_trace_reset(struct trace_array *tr)
347 {
348 tracing_stop_cmdline_record();
349 unregister_ftrace_graph(tr->gops);
350 }
351
graph_trace_update_thresh(struct trace_array * tr)352 static int graph_trace_update_thresh(struct trace_array *tr)
353 {
354 graph_trace_reset(tr);
355 return graph_trace_init(tr);
356 }
357
358 static int max_bytes_for_cpu;
359
print_graph_cpu(struct trace_seq * s,int cpu)360 static void print_graph_cpu(struct trace_seq *s, int cpu)
361 {
362 /*
363 * Start with a space character - to make it stand out
364 * to the right a bit when trace output is pasted into
365 * email:
366 */
367 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
368 }
369
370 #define TRACE_GRAPH_PROCINFO_LENGTH 14
371
print_graph_proc(struct trace_seq * s,pid_t pid)372 static void print_graph_proc(struct trace_seq *s, pid_t pid)
373 {
374 char comm[TASK_COMM_LEN];
375 /* sign + log10(MAX_INT) + '\0' */
376 char pid_str[11];
377 int spaces = 0;
378 int len;
379 int i;
380
381 trace_find_cmdline(pid, comm);
382 comm[7] = '\0';
383 sprintf(pid_str, "%d", pid);
384
385 /* 1 stands for the "-" character */
386 len = strlen(comm) + strlen(pid_str) + 1;
387
388 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
389 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
390
391 /* First spaces to align center */
392 for (i = 0; i < spaces / 2; i++)
393 trace_seq_putc(s, ' ');
394
395 trace_seq_printf(s, "%s-%s", comm, pid_str);
396
397 /* Last spaces to align center */
398 for (i = 0; i < spaces - (spaces / 2); i++)
399 trace_seq_putc(s, ' ');
400 }
401
402
print_graph_lat_fmt(struct trace_seq * s,struct trace_entry * entry)403 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
404 {
405 trace_seq_putc(s, ' ');
406 trace_print_lat_fmt(s, entry);
407 trace_seq_puts(s, " | ");
408 }
409
410 /* If the pid changed since the last trace, output this event */
411 static void
verif_pid(struct trace_seq * s,pid_t pid,int cpu,struct fgraph_data * data)412 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
413 {
414 pid_t prev_pid;
415 pid_t *last_pid;
416
417 if (!data)
418 return;
419
420 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
421
422 if (*last_pid == pid)
423 return;
424
425 prev_pid = *last_pid;
426 *last_pid = pid;
427
428 if (prev_pid == -1)
429 return;
430 /*
431 * Context-switch trace line:
432
433 ------------------------------------------
434 | 1) migration/0--1 => sshd-1755
435 ------------------------------------------
436
437 */
438 trace_seq_puts(s, " ------------------------------------------\n");
439 print_graph_cpu(s, cpu);
440 print_graph_proc(s, prev_pid);
441 trace_seq_puts(s, " => ");
442 print_graph_proc(s, pid);
443 trace_seq_puts(s, "\n ------------------------------------------\n\n");
444 }
445
446 static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * curr)447 get_return_for_leaf(struct trace_iterator *iter,
448 struct ftrace_graph_ent_entry *curr)
449 {
450 struct fgraph_data *data = iter->private;
451 struct ring_buffer_iter *ring_iter = NULL;
452 struct ring_buffer_event *event;
453 struct ftrace_graph_ret_entry *next;
454
455 /*
456 * If the previous output failed to write to the seq buffer,
457 * then we just reuse the data from before.
458 */
459 if (data && data->failed) {
460 curr = &data->ent;
461 next = &data->ret;
462 } else {
463
464 ring_iter = trace_buffer_iter(iter, iter->cpu);
465
466 /* First peek to compare current entry and the next one */
467 if (ring_iter)
468 event = ring_buffer_iter_peek(ring_iter, NULL);
469 else {
470 /*
471 * We need to consume the current entry to see
472 * the next one.
473 */
474 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
475 NULL, NULL);
476 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
477 NULL, NULL);
478 }
479
480 if (!event)
481 return NULL;
482
483 next = ring_buffer_event_data(event);
484
485 if (data) {
486 /*
487 * Save current and next entries for later reference
488 * if the output fails.
489 */
490 data->ent = *curr;
491 /*
492 * If the next event is not a return type, then
493 * we only care about what type it is. Otherwise we can
494 * safely copy the entire event.
495 */
496 if (next->ent.type == TRACE_GRAPH_RET)
497 data->ret = *next;
498 else
499 data->ret.ent.type = next->ent.type;
500 }
501 }
502
503 if (next->ent.type != TRACE_GRAPH_RET)
504 return NULL;
505
506 if (curr->ent.pid != next->ent.pid ||
507 curr->graph_ent.func != next->ret.func)
508 return NULL;
509
510 /* this is a leaf, now advance the iterator */
511 if (ring_iter)
512 ring_buffer_iter_advance(ring_iter);
513
514 return next;
515 }
516
print_graph_abs_time(u64 t,struct trace_seq * s)517 static void print_graph_abs_time(u64 t, struct trace_seq *s)
518 {
519 unsigned long usecs_rem;
520
521 usecs_rem = do_div(t, NSEC_PER_SEC);
522 usecs_rem /= 1000;
523
524 trace_seq_printf(s, "%5lu.%06lu | ",
525 (unsigned long)t, usecs_rem);
526 }
527
528 static void
print_graph_rel_time(struct trace_iterator * iter,struct trace_seq * s)529 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
530 {
531 unsigned long long usecs;
532
533 usecs = iter->ts - iter->array_buffer->time_start;
534 do_div(usecs, NSEC_PER_USEC);
535
536 trace_seq_printf(s, "%9llu us | ", usecs);
537 }
538
539 static void
print_graph_irq(struct trace_iterator * iter,unsigned long addr,enum trace_type type,int cpu,pid_t pid,u32 flags)540 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
541 enum trace_type type, int cpu, pid_t pid, u32 flags)
542 {
543 struct trace_array *tr = iter->tr;
544 struct trace_seq *s = &iter->seq;
545 struct trace_entry *ent = iter->ent;
546
547 addr += iter->tr->text_delta;
548
549 if (addr < (unsigned long)__irqentry_text_start ||
550 addr >= (unsigned long)__irqentry_text_end)
551 return;
552
553 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
554 /* Absolute time */
555 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
556 print_graph_abs_time(iter->ts, s);
557
558 /* Relative time */
559 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
560 print_graph_rel_time(iter, s);
561
562 /* Cpu */
563 if (flags & TRACE_GRAPH_PRINT_CPU)
564 print_graph_cpu(s, cpu);
565
566 /* Proc */
567 if (flags & TRACE_GRAPH_PRINT_PROC) {
568 print_graph_proc(s, pid);
569 trace_seq_puts(s, " | ");
570 }
571
572 /* Latency format */
573 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
574 print_graph_lat_fmt(s, ent);
575 }
576
577 /* No overhead */
578 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
579
580 if (type == TRACE_GRAPH_ENT)
581 trace_seq_puts(s, "==========>");
582 else
583 trace_seq_puts(s, "<==========");
584
585 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
586 trace_seq_putc(s, '\n');
587 }
588
589 void
trace_print_graph_duration(unsigned long long duration,struct trace_seq * s)590 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
591 {
592 unsigned long nsecs_rem = do_div(duration, 1000);
593 /* log10(ULONG_MAX) + '\0' */
594 char usecs_str[21];
595 char nsecs_str[5];
596 int len;
597 int i;
598
599 sprintf(usecs_str, "%lu", (unsigned long) duration);
600
601 /* Print msecs */
602 trace_seq_printf(s, "%s", usecs_str);
603
604 len = strlen(usecs_str);
605
606 /* Print nsecs (we don't want to exceed 7 numbers) */
607 if (len < 7) {
608 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
609
610 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
611 trace_seq_printf(s, ".%s", nsecs_str);
612 len += strlen(nsecs_str) + 1;
613 }
614
615 trace_seq_puts(s, " us ");
616
617 /* Print remaining spaces to fit the row's width */
618 for (i = len; i < 8; i++)
619 trace_seq_putc(s, ' ');
620 }
621
622 static void
print_graph_duration(struct trace_array * tr,unsigned long long duration,struct trace_seq * s,u32 flags)623 print_graph_duration(struct trace_array *tr, unsigned long long duration,
624 struct trace_seq *s, u32 flags)
625 {
626 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
627 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
628 return;
629
630 /* No real adata, just filling the column with spaces */
631 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
632 case FLAGS_FILL_FULL:
633 trace_seq_puts(s, " | ");
634 return;
635 case FLAGS_FILL_START:
636 trace_seq_puts(s, " ");
637 return;
638 case FLAGS_FILL_END:
639 trace_seq_puts(s, " |");
640 return;
641 }
642
643 /* Signal a overhead of time execution to the output */
644 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
645 trace_seq_printf(s, "%c ", trace_find_mark(duration));
646 else
647 trace_seq_puts(s, " ");
648
649 trace_print_graph_duration(duration, s);
650 trace_seq_puts(s, "| ");
651 }
652
653 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
654
655 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
656
print_graph_retval(struct trace_seq * s,unsigned long retval,bool leaf,void * func,bool hex_format)657 static void print_graph_retval(struct trace_seq *s, unsigned long retval,
658 bool leaf, void *func, bool hex_format)
659 {
660 unsigned long err_code = 0;
661
662 if (retval == 0 || hex_format)
663 goto done;
664
665 /* Check if the return value matches the negative format */
666 if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
667 (((u64)retval) >> 32) == 0) {
668 /* sign extension */
669 err_code = (unsigned long)(s32)retval;
670 } else {
671 err_code = retval;
672 }
673
674 if (!IS_ERR_VALUE(err_code))
675 err_code = 0;
676
677 done:
678 if (leaf) {
679 if (hex_format || (err_code == 0))
680 trace_seq_printf(s, "%ps(); /* = 0x%lx */\n",
681 func, retval);
682 else
683 trace_seq_printf(s, "%ps(); /* = %ld */\n",
684 func, err_code);
685 } else {
686 if (hex_format || (err_code == 0))
687 trace_seq_printf(s, "} /* %ps = 0x%lx */\n",
688 func, retval);
689 else
690 trace_seq_printf(s, "} /* %ps = %ld */\n",
691 func, err_code);
692 }
693 }
694
695 #else
696
697 #define __TRACE_GRAPH_PRINT_RETVAL 0
698
699 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0)
700
701 #endif
702
703 /* Case of a leaf function on its call entry */
704 static enum print_line_t
print_graph_entry_leaf(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct ftrace_graph_ret_entry * ret_entry,struct trace_seq * s,u32 flags)705 print_graph_entry_leaf(struct trace_iterator *iter,
706 struct ftrace_graph_ent_entry *entry,
707 struct ftrace_graph_ret_entry *ret_entry,
708 struct trace_seq *s, u32 flags)
709 {
710 struct fgraph_data *data = iter->private;
711 struct trace_array *tr = iter->tr;
712 struct ftrace_graph_ret *graph_ret;
713 struct ftrace_graph_ent *call;
714 unsigned long long duration;
715 unsigned long func;
716 int cpu = iter->cpu;
717 int i;
718
719 graph_ret = &ret_entry->ret;
720 call = &entry->graph_ent;
721 duration = graph_ret->rettime - graph_ret->calltime;
722
723 func = call->func + iter->tr->text_delta;
724
725 if (data) {
726 struct fgraph_cpu_data *cpu_data;
727
728 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
729
730 /*
731 * Comments display at + 1 to depth. Since
732 * this is a leaf function, keep the comments
733 * equal to this depth.
734 */
735 cpu_data->depth = call->depth - 1;
736
737 /* No need to keep this function around for this depth */
738 if (call->depth < FTRACE_RETFUNC_DEPTH &&
739 !WARN_ON_ONCE(call->depth < 0))
740 cpu_data->enter_funcs[call->depth] = 0;
741 }
742
743 /* Overhead and duration */
744 print_graph_duration(tr, duration, s, flags);
745
746 /* Function */
747 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
748 trace_seq_putc(s, ' ');
749
750 /*
751 * Write out the function return value if the option function-retval is
752 * enabled.
753 */
754 if (flags & __TRACE_GRAPH_PRINT_RETVAL)
755 print_graph_retval(s, graph_ret->retval, true, (void *)func,
756 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
757 else
758 trace_seq_printf(s, "%ps();\n", (void *)func);
759
760 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
761 cpu, iter->ent->pid, flags);
762
763 return trace_handle_return(s);
764 }
765
766 static enum print_line_t
print_graph_entry_nested(struct trace_iterator * iter,struct ftrace_graph_ent_entry * entry,struct trace_seq * s,int cpu,u32 flags)767 print_graph_entry_nested(struct trace_iterator *iter,
768 struct ftrace_graph_ent_entry *entry,
769 struct trace_seq *s, int cpu, u32 flags)
770 {
771 struct ftrace_graph_ent *call = &entry->graph_ent;
772 struct fgraph_data *data = iter->private;
773 struct trace_array *tr = iter->tr;
774 unsigned long func;
775 int i;
776
777 if (data) {
778 struct fgraph_cpu_data *cpu_data;
779 int cpu = iter->cpu;
780
781 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782 cpu_data->depth = call->depth;
783
784 /* Save this function pointer to see if the exit matches */
785 if (call->depth < FTRACE_RETFUNC_DEPTH &&
786 !WARN_ON_ONCE(call->depth < 0))
787 cpu_data->enter_funcs[call->depth] = call->func;
788 }
789
790 /* No time */
791 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
792
793 /* Function */
794 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
795 trace_seq_putc(s, ' ');
796
797 func = call->func + iter->tr->text_delta;
798
799 trace_seq_printf(s, "%ps() {\n", (void *)func);
800
801 if (trace_seq_has_overflowed(s))
802 return TRACE_TYPE_PARTIAL_LINE;
803
804 /*
805 * we already consumed the current entry to check the next one
806 * and see if this is a leaf.
807 */
808 return TRACE_TYPE_NO_CONSUME;
809 }
810
811 static void
print_graph_prologue(struct trace_iterator * iter,struct trace_seq * s,int type,unsigned long addr,u32 flags)812 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
813 int type, unsigned long addr, u32 flags)
814 {
815 struct fgraph_data *data = iter->private;
816 struct trace_entry *ent = iter->ent;
817 struct trace_array *tr = iter->tr;
818 int cpu = iter->cpu;
819
820 /* Pid */
821 verif_pid(s, ent->pid, cpu, data);
822
823 if (type)
824 /* Interrupt */
825 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
826
827 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
828 return;
829
830 /* Absolute time */
831 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
832 print_graph_abs_time(iter->ts, s);
833
834 /* Relative time */
835 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
836 print_graph_rel_time(iter, s);
837
838 /* Cpu */
839 if (flags & TRACE_GRAPH_PRINT_CPU)
840 print_graph_cpu(s, cpu);
841
842 /* Proc */
843 if (flags & TRACE_GRAPH_PRINT_PROC) {
844 print_graph_proc(s, ent->pid);
845 trace_seq_puts(s, " | ");
846 }
847
848 /* Latency format */
849 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
850 print_graph_lat_fmt(s, ent);
851
852 return;
853 }
854
855 /*
856 * Entry check for irq code
857 *
858 * returns 1 if
859 * - we are inside irq code
860 * - we just entered irq code
861 *
862 * returns 0 if
863 * - funcgraph-interrupts option is set
864 * - we are not inside irq code
865 */
866 static int
check_irq_entry(struct trace_iterator * iter,u32 flags,unsigned long addr,int depth)867 check_irq_entry(struct trace_iterator *iter, u32 flags,
868 unsigned long addr, int depth)
869 {
870 int cpu = iter->cpu;
871 int *depth_irq;
872 struct fgraph_data *data = iter->private;
873
874 addr += iter->tr->text_delta;
875
876 /*
877 * If we are either displaying irqs, or we got called as
878 * a graph event and private data does not exist,
879 * then we bypass the irq check.
880 */
881 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
882 (!data))
883 return 0;
884
885 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
886
887 /*
888 * We are inside the irq code
889 */
890 if (*depth_irq >= 0)
891 return 1;
892
893 if ((addr < (unsigned long)__irqentry_text_start) ||
894 (addr >= (unsigned long)__irqentry_text_end))
895 return 0;
896
897 /*
898 * We are entering irq code.
899 */
900 *depth_irq = depth;
901 return 1;
902 }
903
904 /*
905 * Return check for irq code
906 *
907 * returns 1 if
908 * - we are inside irq code
909 * - we just left irq code
910 *
911 * returns 0 if
912 * - funcgraph-interrupts option is set
913 * - we are not inside irq code
914 */
915 static int
check_irq_return(struct trace_iterator * iter,u32 flags,int depth)916 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
917 {
918 int cpu = iter->cpu;
919 int *depth_irq;
920 struct fgraph_data *data = iter->private;
921
922 /*
923 * If we are either displaying irqs, or we got called as
924 * a graph event and private data does not exist,
925 * then we bypass the irq check.
926 */
927 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
928 (!data))
929 return 0;
930
931 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
932
933 /*
934 * We are not inside the irq code.
935 */
936 if (*depth_irq == -1)
937 return 0;
938
939 /*
940 * We are inside the irq code, and this is returning entry.
941 * Let's not trace it and clear the entry depth, since
942 * we are out of irq code.
943 *
944 * This condition ensures that we 'leave the irq code' once
945 * we are out of the entry depth. Thus protecting us from
946 * the RETURN entry loss.
947 */
948 if (*depth_irq >= depth) {
949 *depth_irq = -1;
950 return 1;
951 }
952
953 /*
954 * We are inside the irq code, and this is not the entry.
955 */
956 return 1;
957 }
958
959 static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry * field,struct trace_seq * s,struct trace_iterator * iter,u32 flags)960 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
961 struct trace_iterator *iter, u32 flags)
962 {
963 struct fgraph_data *data = iter->private;
964 struct ftrace_graph_ent *call = &field->graph_ent;
965 struct ftrace_graph_ret_entry *leaf_ret;
966 static enum print_line_t ret;
967 int cpu = iter->cpu;
968
969 if (check_irq_entry(iter, flags, call->func, call->depth))
970 return TRACE_TYPE_HANDLED;
971
972 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
973
974 leaf_ret = get_return_for_leaf(iter, field);
975 if (leaf_ret)
976 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
977 else
978 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
979
980 if (data) {
981 /*
982 * If we failed to write our output, then we need to make
983 * note of it. Because we already consumed our entry.
984 */
985 if (s->full) {
986 data->failed = 1;
987 data->cpu = cpu;
988 } else
989 data->failed = 0;
990 }
991
992 return ret;
993 }
994
995 static enum print_line_t
print_graph_return(struct ftrace_graph_ret * trace,struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)996 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
997 struct trace_entry *ent, struct trace_iterator *iter,
998 u32 flags)
999 {
1000 unsigned long long duration = trace->rettime - trace->calltime;
1001 struct fgraph_data *data = iter->private;
1002 struct trace_array *tr = iter->tr;
1003 unsigned long func;
1004 pid_t pid = ent->pid;
1005 int cpu = iter->cpu;
1006 int func_match = 1;
1007 int i;
1008
1009 func = trace->func + iter->tr->text_delta;
1010
1011 if (check_irq_return(iter, flags, trace->depth))
1012 return TRACE_TYPE_HANDLED;
1013
1014 if (data) {
1015 struct fgraph_cpu_data *cpu_data;
1016 int cpu = iter->cpu;
1017
1018 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1019
1020 /*
1021 * Comments display at + 1 to depth. This is the
1022 * return from a function, we now want the comments
1023 * to display at the same level of the bracket.
1024 */
1025 cpu_data->depth = trace->depth - 1;
1026
1027 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1028 !WARN_ON_ONCE(trace->depth < 0)) {
1029 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1030 func_match = 0;
1031 cpu_data->enter_funcs[trace->depth] = 0;
1032 }
1033 }
1034
1035 print_graph_prologue(iter, s, 0, 0, flags);
1036
1037 /* Overhead and duration */
1038 print_graph_duration(tr, duration, s, flags);
1039
1040 /* Closing brace */
1041 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1042 trace_seq_putc(s, ' ');
1043
1044 /*
1045 * Always write out the function name and its return value if the
1046 * function-retval option is enabled.
1047 */
1048 if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1049 print_graph_retval(s, trace->retval, false, (void *)func,
1050 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX));
1051 } else {
1052 /*
1053 * If the return function does not have a matching entry,
1054 * then the entry was lost. Instead of just printing
1055 * the '}' and letting the user guess what function this
1056 * belongs to, write out the function name. Always do
1057 * that if the funcgraph-tail option is enabled.
1058 */
1059 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1060 trace_seq_puts(s, "}\n");
1061 else
1062 trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1063 }
1064
1065 /* Overrun */
1066 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1067 trace_seq_printf(s, " (Overruns: %u)\n",
1068 trace->overrun);
1069
1070 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1071 cpu, pid, flags);
1072
1073 return trace_handle_return(s);
1074 }
1075
1076 static enum print_line_t
print_graph_comment(struct trace_seq * s,struct trace_entry * ent,struct trace_iterator * iter,u32 flags)1077 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1078 struct trace_iterator *iter, u32 flags)
1079 {
1080 struct trace_array *tr = iter->tr;
1081 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1082 struct fgraph_data *data = iter->private;
1083 struct trace_event *event;
1084 int depth = 0;
1085 int ret;
1086 int i;
1087
1088 if (data)
1089 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1090
1091 print_graph_prologue(iter, s, 0, 0, flags);
1092
1093 /* No time */
1094 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1095
1096 /* Indentation */
1097 if (depth > 0)
1098 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1099 trace_seq_putc(s, ' ');
1100
1101 /* The comment */
1102 trace_seq_puts(s, "/* ");
1103
1104 switch (iter->ent->type) {
1105 case TRACE_BPUTS:
1106 ret = trace_print_bputs_msg_only(iter);
1107 if (ret != TRACE_TYPE_HANDLED)
1108 return ret;
1109 break;
1110 case TRACE_BPRINT:
1111 ret = trace_print_bprintk_msg_only(iter);
1112 if (ret != TRACE_TYPE_HANDLED)
1113 return ret;
1114 break;
1115 case TRACE_PRINT:
1116 ret = trace_print_printk_msg_only(iter);
1117 if (ret != TRACE_TYPE_HANDLED)
1118 return ret;
1119 break;
1120 default:
1121 event = ftrace_find_event(ent->type);
1122 if (!event)
1123 return TRACE_TYPE_UNHANDLED;
1124
1125 ret = event->funcs->trace(iter, sym_flags, event);
1126 if (ret != TRACE_TYPE_HANDLED)
1127 return ret;
1128 }
1129
1130 if (trace_seq_has_overflowed(s))
1131 goto out;
1132
1133 /* Strip ending newline */
1134 if (s->buffer[s->seq.len - 1] == '\n') {
1135 s->buffer[s->seq.len - 1] = '\0';
1136 s->seq.len--;
1137 }
1138
1139 trace_seq_puts(s, " */\n");
1140 out:
1141 return trace_handle_return(s);
1142 }
1143
1144
1145 enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1146 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1147 {
1148 struct ftrace_graph_ent_entry *field;
1149 struct fgraph_data *data = iter->private;
1150 struct trace_entry *entry = iter->ent;
1151 struct trace_seq *s = &iter->seq;
1152 int cpu = iter->cpu;
1153 int ret;
1154
1155 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1156 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1157 return TRACE_TYPE_HANDLED;
1158 }
1159
1160 /*
1161 * If the last output failed, there's a possibility we need
1162 * to print out the missing entry which would never go out.
1163 */
1164 if (data && data->failed) {
1165 field = &data->ent;
1166 iter->cpu = data->cpu;
1167 ret = print_graph_entry(field, s, iter, flags);
1168 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1169 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1170 ret = TRACE_TYPE_NO_CONSUME;
1171 }
1172 iter->cpu = cpu;
1173 return ret;
1174 }
1175
1176 switch (entry->type) {
1177 case TRACE_GRAPH_ENT: {
1178 /*
1179 * print_graph_entry() may consume the current event,
1180 * thus @field may become invalid, so we need to save it.
1181 * sizeof(struct ftrace_graph_ent_entry) is very small,
1182 * it can be safely saved at the stack.
1183 */
1184 struct ftrace_graph_ent_entry saved;
1185 trace_assign_type(field, entry);
1186 saved = *field;
1187 return print_graph_entry(&saved, s, iter, flags);
1188 }
1189 case TRACE_GRAPH_RET: {
1190 struct ftrace_graph_ret_entry *field;
1191 trace_assign_type(field, entry);
1192 return print_graph_return(&field->ret, s, entry, iter, flags);
1193 }
1194 case TRACE_STACK:
1195 case TRACE_FN:
1196 /* dont trace stack and functions as comments */
1197 return TRACE_TYPE_UNHANDLED;
1198
1199 default:
1200 return print_graph_comment(s, entry, iter, flags);
1201 }
1202
1203 return TRACE_TYPE_HANDLED;
1204 }
1205
1206 static enum print_line_t
print_graph_function(struct trace_iterator * iter)1207 print_graph_function(struct trace_iterator *iter)
1208 {
1209 return print_graph_function_flags(iter, tracer_flags.val);
1210 }
1211
1212 static enum print_line_t
print_graph_function_event(struct trace_iterator * iter,int flags,struct trace_event * event)1213 print_graph_function_event(struct trace_iterator *iter, int flags,
1214 struct trace_event *event)
1215 {
1216 return print_graph_function(iter);
1217 }
1218
print_lat_header(struct seq_file * s,u32 flags)1219 static void print_lat_header(struct seq_file *s, u32 flags)
1220 {
1221 static const char spaces[] = " " /* 16 spaces */
1222 " " /* 4 spaces */
1223 " "; /* 17 spaces */
1224 int size = 0;
1225
1226 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1227 size += 16;
1228 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1229 size += 16;
1230 if (flags & TRACE_GRAPH_PRINT_CPU)
1231 size += 4;
1232 if (flags & TRACE_GRAPH_PRINT_PROC)
1233 size += 17;
1234
1235 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1236 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1237 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1238 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1239 seq_printf(s, "#%.*s||| / \n", size, spaces);
1240 }
1241
__print_graph_headers_flags(struct trace_array * tr,struct seq_file * s,u32 flags)1242 static void __print_graph_headers_flags(struct trace_array *tr,
1243 struct seq_file *s, u32 flags)
1244 {
1245 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1246
1247 if (lat)
1248 print_lat_header(s, flags);
1249
1250 /* 1st line */
1251 seq_putc(s, '#');
1252 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1253 seq_puts(s, " TIME ");
1254 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1255 seq_puts(s, " REL TIME ");
1256 if (flags & TRACE_GRAPH_PRINT_CPU)
1257 seq_puts(s, " CPU");
1258 if (flags & TRACE_GRAPH_PRINT_PROC)
1259 seq_puts(s, " TASK/PID ");
1260 if (lat)
1261 seq_puts(s, "|||| ");
1262 if (flags & TRACE_GRAPH_PRINT_DURATION)
1263 seq_puts(s, " DURATION ");
1264 seq_puts(s, " FUNCTION CALLS\n");
1265
1266 /* 2nd line */
1267 seq_putc(s, '#');
1268 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1269 seq_puts(s, " | ");
1270 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1271 seq_puts(s, " | ");
1272 if (flags & TRACE_GRAPH_PRINT_CPU)
1273 seq_puts(s, " | ");
1274 if (flags & TRACE_GRAPH_PRINT_PROC)
1275 seq_puts(s, " | | ");
1276 if (lat)
1277 seq_puts(s, "|||| ");
1278 if (flags & TRACE_GRAPH_PRINT_DURATION)
1279 seq_puts(s, " | | ");
1280 seq_puts(s, " | | | |\n");
1281 }
1282
print_graph_headers(struct seq_file * s)1283 static void print_graph_headers(struct seq_file *s)
1284 {
1285 print_graph_headers_flags(s, tracer_flags.val);
1286 }
1287
print_graph_headers_flags(struct seq_file * s,u32 flags)1288 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1289 {
1290 struct trace_iterator *iter = s->private;
1291 struct trace_array *tr = iter->tr;
1292
1293 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1294 return;
1295
1296 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1297 /* print nothing if the buffers are empty */
1298 if (trace_empty(iter))
1299 return;
1300
1301 print_trace_header(s, iter);
1302 }
1303
1304 __print_graph_headers_flags(tr, s, flags);
1305 }
1306
graph_trace_open(struct trace_iterator * iter)1307 void graph_trace_open(struct trace_iterator *iter)
1308 {
1309 /* pid and depth on the last trace processed */
1310 struct fgraph_data *data;
1311 gfp_t gfpflags;
1312 int cpu;
1313
1314 iter->private = NULL;
1315
1316 /* We can be called in atomic context via ftrace_dump() */
1317 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1318
1319 data = kzalloc(sizeof(*data), gfpflags);
1320 if (!data)
1321 goto out_err;
1322
1323 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1324 if (!data->cpu_data)
1325 goto out_err_free;
1326
1327 for_each_possible_cpu(cpu) {
1328 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1329 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1330 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1331 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1332
1333 *pid = -1;
1334 *depth = 0;
1335 *ignore = 0;
1336 *depth_irq = -1;
1337 }
1338
1339 iter->private = data;
1340
1341 return;
1342
1343 out_err_free:
1344 kfree(data);
1345 out_err:
1346 pr_warn("function graph tracer: not enough memory\n");
1347 }
1348
graph_trace_close(struct trace_iterator * iter)1349 void graph_trace_close(struct trace_iterator *iter)
1350 {
1351 struct fgraph_data *data = iter->private;
1352
1353 if (data) {
1354 free_percpu(data->cpu_data);
1355 kfree(data);
1356 }
1357 }
1358
1359 static int
func_graph_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1360 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1361 {
1362 if (bit == TRACE_GRAPH_PRINT_IRQS)
1363 ftrace_graph_skip_irqs = !set;
1364
1365 if (bit == TRACE_GRAPH_SLEEP_TIME)
1366 ftrace_graph_sleep_time_control(set);
1367
1368 if (bit == TRACE_GRAPH_GRAPH_TIME)
1369 ftrace_graph_graph_time_control(set);
1370
1371 return 0;
1372 }
1373
1374 static struct trace_event_functions graph_functions = {
1375 .trace = print_graph_function_event,
1376 };
1377
1378 static struct trace_event graph_trace_entry_event = {
1379 .type = TRACE_GRAPH_ENT,
1380 .funcs = &graph_functions,
1381 };
1382
1383 static struct trace_event graph_trace_ret_event = {
1384 .type = TRACE_GRAPH_RET,
1385 .funcs = &graph_functions
1386 };
1387
1388 static struct tracer graph_trace __tracer_data = {
1389 .name = "function_graph",
1390 .update_thresh = graph_trace_update_thresh,
1391 .open = graph_trace_open,
1392 .pipe_open = graph_trace_open,
1393 .close = graph_trace_close,
1394 .pipe_close = graph_trace_close,
1395 .init = graph_trace_init,
1396 .reset = graph_trace_reset,
1397 .print_line = print_graph_function,
1398 .print_header = print_graph_headers,
1399 .flags = &tracer_flags,
1400 .set_flag = func_graph_set_flag,
1401 .allow_instances = true,
1402 #ifdef CONFIG_FTRACE_SELFTEST
1403 .selftest = trace_selftest_startup_function_graph,
1404 #endif
1405 };
1406
1407
1408 static ssize_t
graph_depth_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1409 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1410 loff_t *ppos)
1411 {
1412 unsigned long val;
1413 int ret;
1414
1415 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1416 if (ret)
1417 return ret;
1418
1419 fgraph_max_depth = val;
1420
1421 *ppos += cnt;
1422
1423 return cnt;
1424 }
1425
1426 static ssize_t
graph_depth_read(struct file * filp,char __user * ubuf,size_t cnt,loff_t * ppos)1427 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1428 loff_t *ppos)
1429 {
1430 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1431 int n;
1432
1433 n = sprintf(buf, "%d\n", fgraph_max_depth);
1434
1435 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1436 }
1437
1438 static const struct file_operations graph_depth_fops = {
1439 .open = tracing_open_generic,
1440 .write = graph_depth_write,
1441 .read = graph_depth_read,
1442 .llseek = generic_file_llseek,
1443 };
1444
init_graph_tracefs(void)1445 static __init int init_graph_tracefs(void)
1446 {
1447 int ret;
1448
1449 ret = tracing_init_dentry();
1450 if (ret)
1451 return 0;
1452
1453 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1454 NULL, &graph_depth_fops);
1455
1456 return 0;
1457 }
1458 fs_initcall(init_graph_tracefs);
1459
init_graph_trace(void)1460 static __init int init_graph_trace(void)
1461 {
1462 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1463
1464 if (!register_trace_event(&graph_trace_entry_event)) {
1465 pr_warn("Warning: could not register graph trace events\n");
1466 return 1;
1467 }
1468
1469 if (!register_trace_event(&graph_trace_ret_event)) {
1470 pr_warn("Warning: could not register graph trace events\n");
1471 return 1;
1472 }
1473
1474 return register_tracer(&graph_trace);
1475 }
1476
1477 core_initcall(init_graph_trace);
1478