1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38
39 /* Our option */
40 enum {
41
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
48 };
49
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51
ftrace_allocate_ftrace_ops(struct trace_array * tr)52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 struct ftrace_ops *ops;
55
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
63
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
67
68 tr->ops = ops;
69 ops->private = tr;
70
71 return 0;
72 }
73
ftrace_free_ftrace_ops(struct trace_array * tr)74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 kfree(tr->ops);
77 tr->ops = NULL;
78 }
79
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)80 int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82 {
83 int ret;
84 /*
85 * The top level array uses the "global_ops", and the files are
86 * created on boot up.
87 */
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 return 0;
90
91 if (!tr->ops)
92 return -EINVAL;
93
94 ret = allocate_fgraph_ops(tr, tr->ops);
95 if (ret) {
96 kfree(tr->ops);
97 return ret;
98 }
99
100 ftrace_create_filter_files(tr->ops, parent);
101
102 return 0;
103 }
104
ftrace_destroy_function_files(struct trace_array * tr)105 void ftrace_destroy_function_files(struct trace_array *tr)
106 {
107 ftrace_destroy_filter_files(tr->ops);
108 ftrace_free_ftrace_ops(tr);
109 free_fgraph_ops(tr);
110 }
111
select_trace_function(u32 flags_val)112 static ftrace_func_t select_trace_function(u32 flags_val)
113 {
114 switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 case TRACE_FUNC_NO_OPTS:
116 return function_trace_call;
117 case TRACE_FUNC_OPT_STACK:
118 return function_stack_trace_call;
119 case TRACE_FUNC_OPT_NO_REPEATS:
120 return function_no_repeats_trace_call;
121 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 return function_stack_no_repeats_trace_call;
123 default:
124 return NULL;
125 }
126 }
127
handle_func_repeats(struct trace_array * tr,u32 flags_val)128 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129 {
130 if (!tr->last_func_repeats &&
131 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 if (!tr->last_func_repeats)
134 return false;
135 }
136
137 return true;
138 }
139
function_trace_init(struct trace_array * tr)140 static int function_trace_init(struct trace_array *tr)
141 {
142 ftrace_func_t func;
143 /*
144 * Instance trace_arrays get their ops allocated
145 * at instance creation. Unless it failed
146 * the allocation.
147 */
148 if (!tr->ops)
149 return -ENOMEM;
150
151 func = select_trace_function(func_flags.val);
152 if (!func)
153 return -EINVAL;
154
155 if (!handle_func_repeats(tr, func_flags.val))
156 return -ENOMEM;
157
158 ftrace_init_array_ops(tr, func);
159
160 tr->array_buffer.cpu = raw_smp_processor_id();
161
162 tracing_start_cmdline_record();
163 tracing_start_function_trace(tr);
164 return 0;
165 }
166
function_trace_reset(struct trace_array * tr)167 static void function_trace_reset(struct trace_array *tr)
168 {
169 tracing_stop_function_trace(tr);
170 tracing_stop_cmdline_record();
171 ftrace_reset_array_ops(tr);
172 }
173
function_trace_start(struct trace_array * tr)174 static void function_trace_start(struct trace_array *tr)
175 {
176 tracing_reset_online_cpus(&tr->array_buffer);
177 }
178
179 /* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
180 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
181 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)182 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
183 {
184 unsigned long true_parent_ip;
185 int idx = 0;
186
187 true_parent_ip = parent_ip;
188 if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
189 true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
190 (unsigned long *)ftrace_regs_get_stack_pointer(fregs));
191 return true_parent_ip;
192 }
193 #else
194 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)195 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
196 {
197 return parent_ip;
198 }
199 #endif
200
201 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)202 function_trace_call(unsigned long ip, unsigned long parent_ip,
203 struct ftrace_ops *op, struct ftrace_regs *fregs)
204 {
205 struct trace_array *tr = op->private;
206 struct trace_array_cpu *data;
207 unsigned int trace_ctx;
208 int bit;
209
210 if (unlikely(!tr->function_enabled))
211 return;
212
213 bit = ftrace_test_recursion_trylock(ip, parent_ip);
214 if (bit < 0)
215 return;
216
217 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
218
219 trace_ctx = tracing_gen_ctx();
220
221 data = this_cpu_ptr(tr->array_buffer.data);
222 if (!atomic_read(&data->disabled))
223 trace_function(tr, ip, parent_ip, trace_ctx);
224
225 ftrace_test_recursion_unlock(bit);
226 }
227
228 #ifdef CONFIG_UNWINDER_ORC
229 /*
230 * Skip 2:
231 *
232 * function_stack_trace_call()
233 * ftrace_call()
234 */
235 #define STACK_SKIP 2
236 #else
237 /*
238 * Skip 3:
239 * __trace_stack()
240 * function_stack_trace_call()
241 * ftrace_call()
242 */
243 #define STACK_SKIP 3
244 #endif
245
246 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)247 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
248 struct ftrace_ops *op, struct ftrace_regs *fregs)
249 {
250 struct trace_array *tr = op->private;
251 struct trace_array_cpu *data;
252 unsigned long flags;
253 long disabled;
254 int cpu;
255 unsigned int trace_ctx;
256 int skip = STACK_SKIP;
257
258 if (unlikely(!tr->function_enabled))
259 return;
260
261 /*
262 * Need to use raw, since this must be called before the
263 * recursive protection is performed.
264 */
265 local_irq_save(flags);
266 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
267 cpu = raw_smp_processor_id();
268 data = per_cpu_ptr(tr->array_buffer.data, cpu);
269 disabled = atomic_inc_return(&data->disabled);
270
271 if (likely(disabled == 1)) {
272 trace_ctx = tracing_gen_ctx_flags(flags);
273 trace_function(tr, ip, parent_ip, trace_ctx);
274 #ifdef CONFIG_UNWINDER_FRAME_POINTER
275 if (ftrace_pids_enabled(op))
276 skip++;
277 #endif
278 __trace_stack(tr, trace_ctx, skip);
279 }
280
281 atomic_dec(&data->disabled);
282 local_irq_restore(flags);
283 }
284
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)285 static inline bool is_repeat_check(struct trace_array *tr,
286 struct trace_func_repeats *last_info,
287 unsigned long ip, unsigned long parent_ip)
288 {
289 if (last_info->ip == ip &&
290 last_info->parent_ip == parent_ip &&
291 last_info->count < U16_MAX) {
292 last_info->ts_last_call =
293 ring_buffer_time_stamp(tr->array_buffer.buffer);
294 last_info->count++;
295 return true;
296 }
297
298 return false;
299 }
300
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)301 static inline void process_repeats(struct trace_array *tr,
302 unsigned long ip, unsigned long parent_ip,
303 struct trace_func_repeats *last_info,
304 unsigned int trace_ctx)
305 {
306 if (last_info->count) {
307 trace_last_func_repeats(tr, last_info, trace_ctx);
308 last_info->count = 0;
309 }
310
311 last_info->ip = ip;
312 last_info->parent_ip = parent_ip;
313 }
314
315 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)316 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
317 struct ftrace_ops *op,
318 struct ftrace_regs *fregs)
319 {
320 struct trace_func_repeats *last_info;
321 struct trace_array *tr = op->private;
322 struct trace_array_cpu *data;
323 unsigned int trace_ctx;
324 unsigned long flags;
325 int bit;
326
327 if (unlikely(!tr->function_enabled))
328 return;
329
330 bit = ftrace_test_recursion_trylock(ip, parent_ip);
331 if (bit < 0)
332 return;
333
334 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
335 data = this_cpu_ptr(tr->array_buffer.data);
336 if (atomic_read(&data->disabled))
337 goto out;
338
339 /*
340 * An interrupt may happen at any place here. But as far as I can see,
341 * the only damage that this can cause is to mess up the repetition
342 * counter without valuable data being lost.
343 * TODO: think about a solution that is better than just hoping to be
344 * lucky.
345 */
346 last_info = this_cpu_ptr(tr->last_func_repeats);
347 if (is_repeat_check(tr, last_info, ip, parent_ip))
348 goto out;
349
350 local_save_flags(flags);
351 trace_ctx = tracing_gen_ctx_flags(flags);
352 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
353
354 trace_function(tr, ip, parent_ip, trace_ctx);
355
356 out:
357 ftrace_test_recursion_unlock(bit);
358 }
359
360 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)361 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
362 struct ftrace_ops *op,
363 struct ftrace_regs *fregs)
364 {
365 struct trace_func_repeats *last_info;
366 struct trace_array *tr = op->private;
367 struct trace_array_cpu *data;
368 unsigned long flags;
369 long disabled;
370 int cpu;
371 unsigned int trace_ctx;
372
373 if (unlikely(!tr->function_enabled))
374 return;
375
376 /*
377 * Need to use raw, since this must be called before the
378 * recursive protection is performed.
379 */
380 local_irq_save(flags);
381 parent_ip = function_get_true_parent_ip(parent_ip, fregs);
382 cpu = raw_smp_processor_id();
383 data = per_cpu_ptr(tr->array_buffer.data, cpu);
384 disabled = atomic_inc_return(&data->disabled);
385
386 if (likely(disabled == 1)) {
387 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
388 if (is_repeat_check(tr, last_info, ip, parent_ip))
389 goto out;
390
391 trace_ctx = tracing_gen_ctx_flags(flags);
392 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
393
394 trace_function(tr, ip, parent_ip, trace_ctx);
395 __trace_stack(tr, trace_ctx, STACK_SKIP);
396 }
397
398 out:
399 atomic_dec(&data->disabled);
400 local_irq_restore(flags);
401 }
402
403 static struct tracer_opt func_opts[] = {
404 #ifdef CONFIG_STACKTRACE
405 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
406 #endif
407 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
408 { } /* Always set a last empty entry */
409 };
410
411 static struct tracer_flags func_flags = {
412 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
413 .opts = func_opts
414 };
415
tracing_start_function_trace(struct trace_array * tr)416 static void tracing_start_function_trace(struct trace_array *tr)
417 {
418 tr->function_enabled = 0;
419 register_ftrace_function(tr->ops);
420 tr->function_enabled = 1;
421 }
422
tracing_stop_function_trace(struct trace_array * tr)423 static void tracing_stop_function_trace(struct trace_array *tr)
424 {
425 tr->function_enabled = 0;
426 unregister_ftrace_function(tr->ops);
427 }
428
429 static struct tracer function_trace;
430
431 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)432 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
433 {
434 ftrace_func_t func;
435 u32 new_flags;
436
437 /* Do nothing if already set. */
438 if (!!set == !!(func_flags.val & bit))
439 return 0;
440
441 /* We can change this flag only when not running. */
442 if (tr->current_trace != &function_trace)
443 return 0;
444
445 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
446 func = select_trace_function(new_flags);
447 if (!func)
448 return -EINVAL;
449
450 /* Check if there's anything to change. */
451 if (tr->ops->func == func)
452 return 0;
453
454 if (!handle_func_repeats(tr, new_flags))
455 return -ENOMEM;
456
457 unregister_ftrace_function(tr->ops);
458 tr->ops->func = func;
459 register_ftrace_function(tr->ops);
460
461 return 0;
462 }
463
464 static struct tracer function_trace __tracer_data =
465 {
466 .name = "function",
467 .init = function_trace_init,
468 .reset = function_trace_reset,
469 .start = function_trace_start,
470 .flags = &func_flags,
471 .set_flag = func_set_flag,
472 .allow_instances = true,
473 #ifdef CONFIG_FTRACE_SELFTEST
474 .selftest = trace_selftest_startup_function,
475 #endif
476 };
477
478 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)479 static void update_traceon_count(struct ftrace_probe_ops *ops,
480 unsigned long ip,
481 struct trace_array *tr, bool on,
482 void *data)
483 {
484 struct ftrace_func_mapper *mapper = data;
485 long *count;
486 long old_count;
487
488 /*
489 * Tracing gets disabled (or enabled) once per count.
490 * This function can be called at the same time on multiple CPUs.
491 * It is fine if both disable (or enable) tracing, as disabling
492 * (or enabling) the second time doesn't do anything as the
493 * state of the tracer is already disabled (or enabled).
494 * What needs to be synchronized in this case is that the count
495 * only gets decremented once, even if the tracer is disabled
496 * (or enabled) twice, as the second one is really a nop.
497 *
498 * The memory barriers guarantee that we only decrement the
499 * counter once. First the count is read to a local variable
500 * and a read barrier is used to make sure that it is loaded
501 * before checking if the tracer is in the state we want.
502 * If the tracer is not in the state we want, then the count
503 * is guaranteed to be the old count.
504 *
505 * Next the tracer is set to the state we want (disabled or enabled)
506 * then a write memory barrier is used to make sure that
507 * the new state is visible before changing the counter by
508 * one minus the old counter. This guarantees that another CPU
509 * executing this code will see the new state before seeing
510 * the new counter value, and would not do anything if the new
511 * counter is seen.
512 *
513 * Note, there is no synchronization between this and a user
514 * setting the tracing_on file. But we currently don't care
515 * about that.
516 */
517 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
518 old_count = *count;
519
520 if (old_count <= 0)
521 return;
522
523 /* Make sure we see count before checking tracing state */
524 smp_rmb();
525
526 if (on == !!tracer_tracing_is_on(tr))
527 return;
528
529 if (on)
530 tracer_tracing_on(tr);
531 else
532 tracer_tracing_off(tr);
533
534 /* Make sure tracing state is visible before updating count */
535 smp_wmb();
536
537 *count = old_count - 1;
538 }
539
540 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)541 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
542 struct trace_array *tr, struct ftrace_probe_ops *ops,
543 void *data)
544 {
545 update_traceon_count(ops, ip, tr, 1, data);
546 }
547
548 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)549 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
550 struct trace_array *tr, struct ftrace_probe_ops *ops,
551 void *data)
552 {
553 update_traceon_count(ops, ip, tr, 0, data);
554 }
555
556 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)557 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
558 struct trace_array *tr, struct ftrace_probe_ops *ops,
559 void *data)
560 {
561 if (tracer_tracing_is_on(tr))
562 return;
563
564 tracer_tracing_on(tr);
565 }
566
567 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)568 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
569 struct trace_array *tr, struct ftrace_probe_ops *ops,
570 void *data)
571 {
572 if (!tracer_tracing_is_on(tr))
573 return;
574
575 tracer_tracing_off(tr);
576 }
577
578 #ifdef CONFIG_UNWINDER_ORC
579 /*
580 * Skip 3:
581 *
582 * function_trace_probe_call()
583 * ftrace_ops_assist_func()
584 * ftrace_call()
585 */
586 #define FTRACE_STACK_SKIP 3
587 #else
588 /*
589 * Skip 5:
590 *
591 * __trace_stack()
592 * ftrace_stacktrace()
593 * function_trace_probe_call()
594 * ftrace_ops_assist_func()
595 * ftrace_call()
596 */
597 #define FTRACE_STACK_SKIP 5
598 #endif
599
trace_stack(struct trace_array * tr)600 static __always_inline void trace_stack(struct trace_array *tr)
601 {
602 unsigned int trace_ctx;
603
604 trace_ctx = tracing_gen_ctx();
605
606 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
607 }
608
609 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)610 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
611 struct trace_array *tr, struct ftrace_probe_ops *ops,
612 void *data)
613 {
614 trace_stack(tr);
615 }
616
617 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)618 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
619 struct trace_array *tr, struct ftrace_probe_ops *ops,
620 void *data)
621 {
622 struct ftrace_func_mapper *mapper = data;
623 long *count;
624 long old_count;
625 long new_count;
626
627 if (!tracing_is_on())
628 return;
629
630 /* unlimited? */
631 if (!mapper) {
632 trace_stack(tr);
633 return;
634 }
635
636 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
637
638 /*
639 * Stack traces should only execute the number of times the
640 * user specified in the counter.
641 */
642 do {
643 old_count = *count;
644
645 if (!old_count)
646 return;
647
648 new_count = old_count - 1;
649 new_count = cmpxchg(count, old_count, new_count);
650 if (new_count == old_count)
651 trace_stack(tr);
652
653 if (!tracing_is_on())
654 return;
655
656 } while (new_count != old_count);
657 }
658
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)659 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
660 void *data)
661 {
662 struct ftrace_func_mapper *mapper = data;
663 long *count = NULL;
664
665 if (mapper)
666 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
667
668 if (count) {
669 if (*count <= 0)
670 return 0;
671 (*count)--;
672 }
673
674 return 1;
675 }
676
677 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)678 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
679 struct trace_array *tr, struct ftrace_probe_ops *ops,
680 void *data)
681 {
682 if (update_count(ops, ip, data))
683 ftrace_dump(DUMP_ALL);
684 }
685
686 /* Only dump the current CPU buffer. */
687 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)688 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
689 struct trace_array *tr, struct ftrace_probe_ops *ops,
690 void *data)
691 {
692 if (update_count(ops, ip, data))
693 ftrace_dump(DUMP_ORIG);
694 }
695
696 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)697 ftrace_probe_print(const char *name, struct seq_file *m,
698 unsigned long ip, struct ftrace_probe_ops *ops,
699 void *data)
700 {
701 struct ftrace_func_mapper *mapper = data;
702 long *count = NULL;
703
704 seq_printf(m, "%ps:%s", (void *)ip, name);
705
706 if (mapper)
707 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
708
709 if (count)
710 seq_printf(m, ":count=%ld\n", *count);
711 else
712 seq_puts(m, ":unlimited\n");
713
714 return 0;
715 }
716
717 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)718 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
719 struct ftrace_probe_ops *ops,
720 void *data)
721 {
722 return ftrace_probe_print("traceon", m, ip, ops, data);
723 }
724
725 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)726 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
727 struct ftrace_probe_ops *ops, void *data)
728 {
729 return ftrace_probe_print("traceoff", m, ip, ops, data);
730 }
731
732 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)733 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
734 struct ftrace_probe_ops *ops, void *data)
735 {
736 return ftrace_probe_print("stacktrace", m, ip, ops, data);
737 }
738
739 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)740 ftrace_dump_print(struct seq_file *m, unsigned long ip,
741 struct ftrace_probe_ops *ops, void *data)
742 {
743 return ftrace_probe_print("dump", m, ip, ops, data);
744 }
745
746 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)747 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
748 struct ftrace_probe_ops *ops, void *data)
749 {
750 return ftrace_probe_print("cpudump", m, ip, ops, data);
751 }
752
753
754 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)755 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
756 unsigned long ip, void *init_data, void **data)
757 {
758 struct ftrace_func_mapper *mapper = *data;
759
760 if (!mapper) {
761 mapper = allocate_ftrace_func_mapper();
762 if (!mapper)
763 return -ENOMEM;
764 *data = mapper;
765 }
766
767 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
768 }
769
770 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)771 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
772 unsigned long ip, void *data)
773 {
774 struct ftrace_func_mapper *mapper = data;
775
776 if (!ip) {
777 free_ftrace_func_mapper(mapper, NULL);
778 return;
779 }
780
781 ftrace_func_mapper_remove_ip(mapper, ip);
782 }
783
784 static struct ftrace_probe_ops traceon_count_probe_ops = {
785 .func = ftrace_traceon_count,
786 .print = ftrace_traceon_print,
787 .init = ftrace_count_init,
788 .free = ftrace_count_free,
789 };
790
791 static struct ftrace_probe_ops traceoff_count_probe_ops = {
792 .func = ftrace_traceoff_count,
793 .print = ftrace_traceoff_print,
794 .init = ftrace_count_init,
795 .free = ftrace_count_free,
796 };
797
798 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
799 .func = ftrace_stacktrace_count,
800 .print = ftrace_stacktrace_print,
801 .init = ftrace_count_init,
802 .free = ftrace_count_free,
803 };
804
805 static struct ftrace_probe_ops dump_probe_ops = {
806 .func = ftrace_dump_probe,
807 .print = ftrace_dump_print,
808 .init = ftrace_count_init,
809 .free = ftrace_count_free,
810 };
811
812 static struct ftrace_probe_ops cpudump_probe_ops = {
813 .func = ftrace_cpudump_probe,
814 .print = ftrace_cpudump_print,
815 };
816
817 static struct ftrace_probe_ops traceon_probe_ops = {
818 .func = ftrace_traceon,
819 .print = ftrace_traceon_print,
820 };
821
822 static struct ftrace_probe_ops traceoff_probe_ops = {
823 .func = ftrace_traceoff,
824 .print = ftrace_traceoff_print,
825 };
826
827 static struct ftrace_probe_ops stacktrace_probe_ops = {
828 .func = ftrace_stacktrace,
829 .print = ftrace_stacktrace_print,
830 };
831
832 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)833 ftrace_trace_probe_callback(struct trace_array *tr,
834 struct ftrace_probe_ops *ops,
835 struct ftrace_hash *hash, char *glob,
836 char *cmd, char *param, int enable)
837 {
838 void *count = (void *)-1;
839 char *number;
840 int ret;
841
842 /* hash funcs only work with set_ftrace_filter */
843 if (!enable)
844 return -EINVAL;
845
846 if (glob[0] == '!')
847 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
848
849 if (!param)
850 goto out_reg;
851
852 number = strsep(¶m, ":");
853
854 if (!strlen(number))
855 goto out_reg;
856
857 /*
858 * We use the callback data field (which is a pointer)
859 * as our counter.
860 */
861 ret = kstrtoul(number, 0, (unsigned long *)&count);
862 if (ret)
863 return ret;
864
865 out_reg:
866 ret = register_ftrace_function_probe(glob, tr, ops, count);
867
868 return ret < 0 ? ret : 0;
869 }
870
871 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)872 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
873 char *glob, char *cmd, char *param, int enable)
874 {
875 struct ftrace_probe_ops *ops;
876
877 if (!tr)
878 return -ENODEV;
879
880 /* we register both traceon and traceoff to this callback */
881 if (strcmp(cmd, "traceon") == 0)
882 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
883 else
884 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
885
886 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887 param, enable);
888 }
889
890 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)891 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
892 char *glob, char *cmd, char *param, int enable)
893 {
894 struct ftrace_probe_ops *ops;
895
896 if (!tr)
897 return -ENODEV;
898
899 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
900
901 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
902 param, enable);
903 }
904
905 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)906 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
907 char *glob, char *cmd, char *param, int enable)
908 {
909 struct ftrace_probe_ops *ops;
910
911 if (!tr)
912 return -ENODEV;
913
914 ops = &dump_probe_ops;
915
916 /* Only dump once. */
917 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
918 "1", enable);
919 }
920
921 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)922 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
923 char *glob, char *cmd, char *param, int enable)
924 {
925 struct ftrace_probe_ops *ops;
926
927 if (!tr)
928 return -ENODEV;
929
930 ops = &cpudump_probe_ops;
931
932 /* Only dump once. */
933 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
934 "1", enable);
935 }
936
937 static struct ftrace_func_command ftrace_traceon_cmd = {
938 .name = "traceon",
939 .func = ftrace_trace_onoff_callback,
940 };
941
942 static struct ftrace_func_command ftrace_traceoff_cmd = {
943 .name = "traceoff",
944 .func = ftrace_trace_onoff_callback,
945 };
946
947 static struct ftrace_func_command ftrace_stacktrace_cmd = {
948 .name = "stacktrace",
949 .func = ftrace_stacktrace_callback,
950 };
951
952 static struct ftrace_func_command ftrace_dump_cmd = {
953 .name = "dump",
954 .func = ftrace_dump_callback,
955 };
956
957 static struct ftrace_func_command ftrace_cpudump_cmd = {
958 .name = "cpudump",
959 .func = ftrace_cpudump_callback,
960 };
961
init_func_cmd_traceon(void)962 static int __init init_func_cmd_traceon(void)
963 {
964 int ret;
965
966 ret = register_ftrace_command(&ftrace_traceoff_cmd);
967 if (ret)
968 return ret;
969
970 ret = register_ftrace_command(&ftrace_traceon_cmd);
971 if (ret)
972 goto out_free_traceoff;
973
974 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
975 if (ret)
976 goto out_free_traceon;
977
978 ret = register_ftrace_command(&ftrace_dump_cmd);
979 if (ret)
980 goto out_free_stacktrace;
981
982 ret = register_ftrace_command(&ftrace_cpudump_cmd);
983 if (ret)
984 goto out_free_dump;
985
986 return 0;
987
988 out_free_dump:
989 unregister_ftrace_command(&ftrace_dump_cmd);
990 out_free_stacktrace:
991 unregister_ftrace_command(&ftrace_stacktrace_cmd);
992 out_free_traceon:
993 unregister_ftrace_command(&ftrace_traceon_cmd);
994 out_free_traceoff:
995 unregister_ftrace_command(&ftrace_traceoff_cmd);
996
997 return ret;
998 }
999 #else
init_func_cmd_traceon(void)1000 static inline int init_func_cmd_traceon(void)
1001 {
1002 return 0;
1003 }
1004 #endif /* CONFIG_DYNAMIC_FTRACE */
1005
init_function_trace(void)1006 __init int init_function_trace(void)
1007 {
1008 init_func_cmd_traceon();
1009 return register_tracer(&function_trace);
1010 }
1011