xref: /linux/kernel/trace/trace_functions.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	int ret;
84 	/*
85 	 * The top level array uses the "global_ops", and the files are
86 	 * created on boot up.
87 	 */
88 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 		return 0;
90 
91 	if (!tr->ops)
92 		return -EINVAL;
93 
94 	ret = allocate_fgraph_ops(tr, tr->ops);
95 	if (ret) {
96 		kfree(tr->ops);
97 		return ret;
98 	}
99 
100 	ftrace_create_filter_files(tr->ops, parent);
101 
102 	return 0;
103 }
104 
105 void ftrace_destroy_function_files(struct trace_array *tr)
106 {
107 	ftrace_destroy_filter_files(tr->ops);
108 	ftrace_free_ftrace_ops(tr);
109 	free_fgraph_ops(tr);
110 }
111 
112 static ftrace_func_t select_trace_function(u32 flags_val)
113 {
114 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 	case TRACE_FUNC_NO_OPTS:
116 		return function_trace_call;
117 	case TRACE_FUNC_OPT_STACK:
118 		return function_stack_trace_call;
119 	case TRACE_FUNC_OPT_NO_REPEATS:
120 		return function_no_repeats_trace_call;
121 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 		return function_stack_no_repeats_trace_call;
123 	default:
124 		return NULL;
125 	}
126 }
127 
128 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129 {
130 	if (!tr->last_func_repeats &&
131 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 		if (!tr->last_func_repeats)
134 			return false;
135 	}
136 
137 	return true;
138 }
139 
140 static int function_trace_init(struct trace_array *tr)
141 {
142 	ftrace_func_t func;
143 	/*
144 	 * Instance trace_arrays get their ops allocated
145 	 * at instance creation. Unless it failed
146 	 * the allocation.
147 	 */
148 	if (!tr->ops)
149 		return -ENOMEM;
150 
151 	func = select_trace_function(func_flags.val);
152 	if (!func)
153 		return -EINVAL;
154 
155 	if (!handle_func_repeats(tr, func_flags.val))
156 		return -ENOMEM;
157 
158 	ftrace_init_array_ops(tr, func);
159 
160 	tr->array_buffer.cpu = raw_smp_processor_id();
161 
162 	tracing_start_cmdline_record();
163 	tracing_start_function_trace(tr);
164 	return 0;
165 }
166 
167 static void function_trace_reset(struct trace_array *tr)
168 {
169 	tracing_stop_function_trace(tr);
170 	tracing_stop_cmdline_record();
171 	ftrace_reset_array_ops(tr);
172 }
173 
174 static void function_trace_start(struct trace_array *tr)
175 {
176 	tracing_reset_online_cpus(&tr->array_buffer);
177 }
178 
179 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
180 static __always_inline unsigned long
181 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
182 {
183 	unsigned long true_parent_ip;
184 	int idx = 0;
185 
186 	true_parent_ip = parent_ip;
187 	if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
188 		true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
189 				(unsigned long *)ftrace_regs_get_stack_pointer(fregs));
190 	return true_parent_ip;
191 }
192 #else
193 static __always_inline unsigned long
194 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
195 {
196 	return parent_ip;
197 }
198 #endif
199 
200 static void
201 function_trace_call(unsigned long ip, unsigned long parent_ip,
202 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
203 {
204 	struct trace_array *tr = op->private;
205 	struct trace_array_cpu *data;
206 	unsigned int trace_ctx;
207 	int bit;
208 
209 	if (unlikely(!tr->function_enabled))
210 		return;
211 
212 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
213 	if (bit < 0)
214 		return;
215 
216 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
217 
218 	trace_ctx = tracing_gen_ctx();
219 
220 	data = this_cpu_ptr(tr->array_buffer.data);
221 	if (!atomic_read(&data->disabled))
222 		trace_function(tr, ip, parent_ip, trace_ctx);
223 
224 	ftrace_test_recursion_unlock(bit);
225 }
226 
227 #ifdef CONFIG_UNWINDER_ORC
228 /*
229  * Skip 2:
230  *
231  *   function_stack_trace_call()
232  *   ftrace_call()
233  */
234 #define STACK_SKIP 2
235 #else
236 /*
237  * Skip 3:
238  *   __trace_stack()
239  *   function_stack_trace_call()
240  *   ftrace_call()
241  */
242 #define STACK_SKIP 3
243 #endif
244 
245 static void
246 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
247 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
248 {
249 	struct trace_array *tr = op->private;
250 	struct trace_array_cpu *data;
251 	unsigned long flags;
252 	long disabled;
253 	int cpu;
254 	unsigned int trace_ctx;
255 	int skip = STACK_SKIP;
256 
257 	if (unlikely(!tr->function_enabled))
258 		return;
259 
260 	/*
261 	 * Need to use raw, since this must be called before the
262 	 * recursive protection is performed.
263 	 */
264 	local_irq_save(flags);
265 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
266 	cpu = raw_smp_processor_id();
267 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
268 	disabled = atomic_inc_return(&data->disabled);
269 
270 	if (likely(disabled == 1)) {
271 		trace_ctx = tracing_gen_ctx_flags(flags);
272 		trace_function(tr, ip, parent_ip, trace_ctx);
273 #ifdef CONFIG_UNWINDER_FRAME_POINTER
274 		if (ftrace_pids_enabled(op))
275 			skip++;
276 #endif
277 		__trace_stack(tr, trace_ctx, skip);
278 	}
279 
280 	atomic_dec(&data->disabled);
281 	local_irq_restore(flags);
282 }
283 
284 static inline bool is_repeat_check(struct trace_array *tr,
285 				   struct trace_func_repeats *last_info,
286 				   unsigned long ip, unsigned long parent_ip)
287 {
288 	if (last_info->ip == ip &&
289 	    last_info->parent_ip == parent_ip &&
290 	    last_info->count < U16_MAX) {
291 		last_info->ts_last_call =
292 			ring_buffer_time_stamp(tr->array_buffer.buffer);
293 		last_info->count++;
294 		return true;
295 	}
296 
297 	return false;
298 }
299 
300 static inline void process_repeats(struct trace_array *tr,
301 				   unsigned long ip, unsigned long parent_ip,
302 				   struct trace_func_repeats *last_info,
303 				   unsigned int trace_ctx)
304 {
305 	if (last_info->count) {
306 		trace_last_func_repeats(tr, last_info, trace_ctx);
307 		last_info->count = 0;
308 	}
309 
310 	last_info->ip = ip;
311 	last_info->parent_ip = parent_ip;
312 }
313 
314 static void
315 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
316 			       struct ftrace_ops *op,
317 			       struct ftrace_regs *fregs)
318 {
319 	struct trace_func_repeats *last_info;
320 	struct trace_array *tr = op->private;
321 	struct trace_array_cpu *data;
322 	unsigned int trace_ctx;
323 	unsigned long flags;
324 	int bit;
325 
326 	if (unlikely(!tr->function_enabled))
327 		return;
328 
329 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
330 	if (bit < 0)
331 		return;
332 
333 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
334 	data = this_cpu_ptr(tr->array_buffer.data);
335 	if (atomic_read(&data->disabled))
336 		goto out;
337 
338 	/*
339 	 * An interrupt may happen at any place here. But as far as I can see,
340 	 * the only damage that this can cause is to mess up the repetition
341 	 * counter without valuable data being lost.
342 	 * TODO: think about a solution that is better than just hoping to be
343 	 * lucky.
344 	 */
345 	last_info = this_cpu_ptr(tr->last_func_repeats);
346 	if (is_repeat_check(tr, last_info, ip, parent_ip))
347 		goto out;
348 
349 	local_save_flags(flags);
350 	trace_ctx = tracing_gen_ctx_flags(flags);
351 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
352 
353 	trace_function(tr, ip, parent_ip, trace_ctx);
354 
355 out:
356 	ftrace_test_recursion_unlock(bit);
357 }
358 
359 static void
360 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
361 				     struct ftrace_ops *op,
362 				     struct ftrace_regs *fregs)
363 {
364 	struct trace_func_repeats *last_info;
365 	struct trace_array *tr = op->private;
366 	struct trace_array_cpu *data;
367 	unsigned long flags;
368 	long disabled;
369 	int cpu;
370 	unsigned int trace_ctx;
371 
372 	if (unlikely(!tr->function_enabled))
373 		return;
374 
375 	/*
376 	 * Need to use raw, since this must be called before the
377 	 * recursive protection is performed.
378 	 */
379 	local_irq_save(flags);
380 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
381 	cpu = raw_smp_processor_id();
382 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
383 	disabled = atomic_inc_return(&data->disabled);
384 
385 	if (likely(disabled == 1)) {
386 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
387 		if (is_repeat_check(tr, last_info, ip, parent_ip))
388 			goto out;
389 
390 		trace_ctx = tracing_gen_ctx_flags(flags);
391 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
392 
393 		trace_function(tr, ip, parent_ip, trace_ctx);
394 		__trace_stack(tr, trace_ctx, STACK_SKIP);
395 	}
396 
397  out:
398 	atomic_dec(&data->disabled);
399 	local_irq_restore(flags);
400 }
401 
402 static struct tracer_opt func_opts[] = {
403 #ifdef CONFIG_STACKTRACE
404 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
405 #endif
406 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
407 	{ } /* Always set a last empty entry */
408 };
409 
410 static struct tracer_flags func_flags = {
411 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
412 	.opts = func_opts
413 };
414 
415 static void tracing_start_function_trace(struct trace_array *tr)
416 {
417 	tr->function_enabled = 0;
418 	register_ftrace_function(tr->ops);
419 	tr->function_enabled = 1;
420 }
421 
422 static void tracing_stop_function_trace(struct trace_array *tr)
423 {
424 	tr->function_enabled = 0;
425 	unregister_ftrace_function(tr->ops);
426 }
427 
428 static struct tracer function_trace;
429 
430 static int
431 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
432 {
433 	ftrace_func_t func;
434 	u32 new_flags;
435 
436 	/* Do nothing if already set. */
437 	if (!!set == !!(func_flags.val & bit))
438 		return 0;
439 
440 	/* We can change this flag only when not running. */
441 	if (tr->current_trace != &function_trace)
442 		return 0;
443 
444 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
445 	func = select_trace_function(new_flags);
446 	if (!func)
447 		return -EINVAL;
448 
449 	/* Check if there's anything to change. */
450 	if (tr->ops->func == func)
451 		return 0;
452 
453 	if (!handle_func_repeats(tr, new_flags))
454 		return -ENOMEM;
455 
456 	unregister_ftrace_function(tr->ops);
457 	tr->ops->func = func;
458 	register_ftrace_function(tr->ops);
459 
460 	return 0;
461 }
462 
463 static struct tracer function_trace __tracer_data =
464 {
465 	.name		= "function",
466 	.init		= function_trace_init,
467 	.reset		= function_trace_reset,
468 	.start		= function_trace_start,
469 	.flags		= &func_flags,
470 	.set_flag	= func_set_flag,
471 	.allow_instances = true,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 	.selftest	= trace_selftest_startup_function,
474 #endif
475 };
476 
477 #ifdef CONFIG_DYNAMIC_FTRACE
478 static void update_traceon_count(struct ftrace_probe_ops *ops,
479 				 unsigned long ip,
480 				 struct trace_array *tr, bool on,
481 				 void *data)
482 {
483 	struct ftrace_func_mapper *mapper = data;
484 	long *count;
485 	long old_count;
486 
487 	/*
488 	 * Tracing gets disabled (or enabled) once per count.
489 	 * This function can be called at the same time on multiple CPUs.
490 	 * It is fine if both disable (or enable) tracing, as disabling
491 	 * (or enabling) the second time doesn't do anything as the
492 	 * state of the tracer is already disabled (or enabled).
493 	 * What needs to be synchronized in this case is that the count
494 	 * only gets decremented once, even if the tracer is disabled
495 	 * (or enabled) twice, as the second one is really a nop.
496 	 *
497 	 * The memory barriers guarantee that we only decrement the
498 	 * counter once. First the count is read to a local variable
499 	 * and a read barrier is used to make sure that it is loaded
500 	 * before checking if the tracer is in the state we want.
501 	 * If the tracer is not in the state we want, then the count
502 	 * is guaranteed to be the old count.
503 	 *
504 	 * Next the tracer is set to the state we want (disabled or enabled)
505 	 * then a write memory barrier is used to make sure that
506 	 * the new state is visible before changing the counter by
507 	 * one minus the old counter. This guarantees that another CPU
508 	 * executing this code will see the new state before seeing
509 	 * the new counter value, and would not do anything if the new
510 	 * counter is seen.
511 	 *
512 	 * Note, there is no synchronization between this and a user
513 	 * setting the tracing_on file. But we currently don't care
514 	 * about that.
515 	 */
516 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
517 	old_count = *count;
518 
519 	if (old_count <= 0)
520 		return;
521 
522 	/* Make sure we see count before checking tracing state */
523 	smp_rmb();
524 
525 	if (on == !!tracer_tracing_is_on(tr))
526 		return;
527 
528 	if (on)
529 		tracer_tracing_on(tr);
530 	else
531 		tracer_tracing_off(tr);
532 
533 	/* Make sure tracing state is visible before updating count */
534 	smp_wmb();
535 
536 	*count = old_count - 1;
537 }
538 
539 static void
540 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
541 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
542 		     void *data)
543 {
544 	update_traceon_count(ops, ip, tr, 1, data);
545 }
546 
547 static void
548 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
549 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
550 		      void *data)
551 {
552 	update_traceon_count(ops, ip, tr, 0, data);
553 }
554 
555 static void
556 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
557 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
558 	       void *data)
559 {
560 	if (tracer_tracing_is_on(tr))
561 		return;
562 
563 	tracer_tracing_on(tr);
564 }
565 
566 static void
567 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
568 		struct trace_array *tr, struct ftrace_probe_ops *ops,
569 		void *data)
570 {
571 	if (!tracer_tracing_is_on(tr))
572 		return;
573 
574 	tracer_tracing_off(tr);
575 }
576 
577 #ifdef CONFIG_UNWINDER_ORC
578 /*
579  * Skip 3:
580  *
581  *   function_trace_probe_call()
582  *   ftrace_ops_assist_func()
583  *   ftrace_call()
584  */
585 #define FTRACE_STACK_SKIP 3
586 #else
587 /*
588  * Skip 5:
589  *
590  *   __trace_stack()
591  *   ftrace_stacktrace()
592  *   function_trace_probe_call()
593  *   ftrace_ops_assist_func()
594  *   ftrace_call()
595  */
596 #define FTRACE_STACK_SKIP 5
597 #endif
598 
599 static __always_inline void trace_stack(struct trace_array *tr)
600 {
601 	unsigned int trace_ctx;
602 
603 	trace_ctx = tracing_gen_ctx();
604 
605 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
606 }
607 
608 static void
609 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
610 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
611 		  void *data)
612 {
613 	trace_stack(tr);
614 }
615 
616 static void
617 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
618 			struct trace_array *tr, struct ftrace_probe_ops *ops,
619 			void *data)
620 {
621 	struct ftrace_func_mapper *mapper = data;
622 	long *count;
623 	long old_count;
624 	long new_count;
625 
626 	if (!tracing_is_on())
627 		return;
628 
629 	/* unlimited? */
630 	if (!mapper) {
631 		trace_stack(tr);
632 		return;
633 	}
634 
635 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636 
637 	/*
638 	 * Stack traces should only execute the number of times the
639 	 * user specified in the counter.
640 	 */
641 	do {
642 		old_count = *count;
643 
644 		if (!old_count)
645 			return;
646 
647 		new_count = old_count - 1;
648 		new_count = cmpxchg(count, old_count, new_count);
649 		if (new_count == old_count)
650 			trace_stack(tr);
651 
652 		if (!tracing_is_on())
653 			return;
654 
655 	} while (new_count != old_count);
656 }
657 
658 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
659 			void *data)
660 {
661 	struct ftrace_func_mapper *mapper = data;
662 	long *count = NULL;
663 
664 	if (mapper)
665 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
666 
667 	if (count) {
668 		if (*count <= 0)
669 			return 0;
670 		(*count)--;
671 	}
672 
673 	return 1;
674 }
675 
676 static void
677 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
678 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
679 		  void *data)
680 {
681 	if (update_count(ops, ip, data))
682 		ftrace_dump(DUMP_ALL);
683 }
684 
685 /* Only dump the current CPU buffer. */
686 static void
687 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
688 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
689 		     void *data)
690 {
691 	if (update_count(ops, ip, data))
692 		ftrace_dump(DUMP_ORIG);
693 }
694 
695 static int
696 ftrace_probe_print(const char *name, struct seq_file *m,
697 		   unsigned long ip, struct ftrace_probe_ops *ops,
698 		   void *data)
699 {
700 	struct ftrace_func_mapper *mapper = data;
701 	long *count = NULL;
702 
703 	seq_printf(m, "%ps:%s", (void *)ip, name);
704 
705 	if (mapper)
706 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
707 
708 	if (count)
709 		seq_printf(m, ":count=%ld\n", *count);
710 	else
711 		seq_puts(m, ":unlimited\n");
712 
713 	return 0;
714 }
715 
716 static int
717 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
718 		     struct ftrace_probe_ops *ops,
719 		     void *data)
720 {
721 	return ftrace_probe_print("traceon", m, ip, ops, data);
722 }
723 
724 static int
725 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
726 			 struct ftrace_probe_ops *ops, void *data)
727 {
728 	return ftrace_probe_print("traceoff", m, ip, ops, data);
729 }
730 
731 static int
732 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
733 			struct ftrace_probe_ops *ops, void *data)
734 {
735 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
736 }
737 
738 static int
739 ftrace_dump_print(struct seq_file *m, unsigned long ip,
740 			struct ftrace_probe_ops *ops, void *data)
741 {
742 	return ftrace_probe_print("dump", m, ip, ops, data);
743 }
744 
745 static int
746 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
747 			struct ftrace_probe_ops *ops, void *data)
748 {
749 	return ftrace_probe_print("cpudump", m, ip, ops, data);
750 }
751 
752 
753 static int
754 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
755 		  unsigned long ip, void *init_data, void **data)
756 {
757 	struct ftrace_func_mapper *mapper = *data;
758 
759 	if (!mapper) {
760 		mapper = allocate_ftrace_func_mapper();
761 		if (!mapper)
762 			return -ENOMEM;
763 		*data = mapper;
764 	}
765 
766 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
767 }
768 
769 static void
770 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
771 		  unsigned long ip, void *data)
772 {
773 	struct ftrace_func_mapper *mapper = data;
774 
775 	if (!ip) {
776 		free_ftrace_func_mapper(mapper, NULL);
777 		return;
778 	}
779 
780 	ftrace_func_mapper_remove_ip(mapper, ip);
781 }
782 
783 static struct ftrace_probe_ops traceon_count_probe_ops = {
784 	.func			= ftrace_traceon_count,
785 	.print			= ftrace_traceon_print,
786 	.init			= ftrace_count_init,
787 	.free			= ftrace_count_free,
788 };
789 
790 static struct ftrace_probe_ops traceoff_count_probe_ops = {
791 	.func			= ftrace_traceoff_count,
792 	.print			= ftrace_traceoff_print,
793 	.init			= ftrace_count_init,
794 	.free			= ftrace_count_free,
795 };
796 
797 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
798 	.func			= ftrace_stacktrace_count,
799 	.print			= ftrace_stacktrace_print,
800 	.init			= ftrace_count_init,
801 	.free			= ftrace_count_free,
802 };
803 
804 static struct ftrace_probe_ops dump_probe_ops = {
805 	.func			= ftrace_dump_probe,
806 	.print			= ftrace_dump_print,
807 	.init			= ftrace_count_init,
808 	.free			= ftrace_count_free,
809 };
810 
811 static struct ftrace_probe_ops cpudump_probe_ops = {
812 	.func			= ftrace_cpudump_probe,
813 	.print			= ftrace_cpudump_print,
814 };
815 
816 static struct ftrace_probe_ops traceon_probe_ops = {
817 	.func			= ftrace_traceon,
818 	.print			= ftrace_traceon_print,
819 };
820 
821 static struct ftrace_probe_ops traceoff_probe_ops = {
822 	.func			= ftrace_traceoff,
823 	.print			= ftrace_traceoff_print,
824 };
825 
826 static struct ftrace_probe_ops stacktrace_probe_ops = {
827 	.func			= ftrace_stacktrace,
828 	.print			= ftrace_stacktrace_print,
829 };
830 
831 static int
832 ftrace_trace_probe_callback(struct trace_array *tr,
833 			    struct ftrace_probe_ops *ops,
834 			    struct ftrace_hash *hash, char *glob,
835 			    char *cmd, char *param, int enable)
836 {
837 	void *count = (void *)-1;
838 	char *number;
839 	int ret;
840 
841 	/* hash funcs only work with set_ftrace_filter */
842 	if (!enable)
843 		return -EINVAL;
844 
845 	if (glob[0] == '!')
846 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
847 
848 	if (!param)
849 		goto out_reg;
850 
851 	number = strsep(&param, ":");
852 
853 	if (!strlen(number))
854 		goto out_reg;
855 
856 	/*
857 	 * We use the callback data field (which is a pointer)
858 	 * as our counter.
859 	 */
860 	ret = kstrtoul(number, 0, (unsigned long *)&count);
861 	if (ret)
862 		return ret;
863 
864  out_reg:
865 	ret = register_ftrace_function_probe(glob, tr, ops, count);
866 
867 	return ret < 0 ? ret : 0;
868 }
869 
870 static int
871 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
872 			    char *glob, char *cmd, char *param, int enable)
873 {
874 	struct ftrace_probe_ops *ops;
875 
876 	if (!tr)
877 		return -ENODEV;
878 
879 	/* we register both traceon and traceoff to this callback */
880 	if (strcmp(cmd, "traceon") == 0)
881 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
882 	else
883 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
884 
885 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
886 					   param, enable);
887 }
888 
889 static int
890 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
891 			   char *glob, char *cmd, char *param, int enable)
892 {
893 	struct ftrace_probe_ops *ops;
894 
895 	if (!tr)
896 		return -ENODEV;
897 
898 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
899 
900 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
901 					   param, enable);
902 }
903 
904 static int
905 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
906 			   char *glob, char *cmd, char *param, int enable)
907 {
908 	struct ftrace_probe_ops *ops;
909 
910 	if (!tr)
911 		return -ENODEV;
912 
913 	ops = &dump_probe_ops;
914 
915 	/* Only dump once. */
916 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
917 					   "1", enable);
918 }
919 
920 static int
921 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
922 			   char *glob, char *cmd, char *param, int enable)
923 {
924 	struct ftrace_probe_ops *ops;
925 
926 	if (!tr)
927 		return -ENODEV;
928 
929 	ops = &cpudump_probe_ops;
930 
931 	/* Only dump once. */
932 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
933 					   "1", enable);
934 }
935 
936 static struct ftrace_func_command ftrace_traceon_cmd = {
937 	.name			= "traceon",
938 	.func			= ftrace_trace_onoff_callback,
939 };
940 
941 static struct ftrace_func_command ftrace_traceoff_cmd = {
942 	.name			= "traceoff",
943 	.func			= ftrace_trace_onoff_callback,
944 };
945 
946 static struct ftrace_func_command ftrace_stacktrace_cmd = {
947 	.name			= "stacktrace",
948 	.func			= ftrace_stacktrace_callback,
949 };
950 
951 static struct ftrace_func_command ftrace_dump_cmd = {
952 	.name			= "dump",
953 	.func			= ftrace_dump_callback,
954 };
955 
956 static struct ftrace_func_command ftrace_cpudump_cmd = {
957 	.name			= "cpudump",
958 	.func			= ftrace_cpudump_callback,
959 };
960 
961 static int __init init_func_cmd_traceon(void)
962 {
963 	int ret;
964 
965 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
966 	if (ret)
967 		return ret;
968 
969 	ret = register_ftrace_command(&ftrace_traceon_cmd);
970 	if (ret)
971 		goto out_free_traceoff;
972 
973 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
974 	if (ret)
975 		goto out_free_traceon;
976 
977 	ret = register_ftrace_command(&ftrace_dump_cmd);
978 	if (ret)
979 		goto out_free_stacktrace;
980 
981 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
982 	if (ret)
983 		goto out_free_dump;
984 
985 	return 0;
986 
987  out_free_dump:
988 	unregister_ftrace_command(&ftrace_dump_cmd);
989  out_free_stacktrace:
990 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
991  out_free_traceon:
992 	unregister_ftrace_command(&ftrace_traceon_cmd);
993  out_free_traceoff:
994 	unregister_ftrace_command(&ftrace_traceoff_cmd);
995 
996 	return ret;
997 }
998 #else
999 static inline int init_func_cmd_traceon(void)
1000 {
1001 	return 0;
1002 }
1003 #endif /* CONFIG_DYNAMIC_FTRACE */
1004 
1005 __init int init_function_trace(void)
1006 {
1007 	init_func_cmd_traceon();
1008 	return register_tracer(&function_trace);
1009 }
1010