xref: /linux/kernel/trace/trace_functions.c (revision 6e59bcc9c8adec9a5bbedfa95a89946c56c510d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 				     struct ftrace_ops *op,
36 				     struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
38 
39 /* Our option */
40 enum {
41 
42 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43 	TRACE_FUNC_OPT_STACK		= 0x1,
44 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45 
46 	/* Update this to next highest bit. */
47 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48 };
49 
50 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51 
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53 {
54 	struct ftrace_ops *ops;
55 
56 	/* The top level array uses the "global_ops" */
57 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 		return 0;
59 
60 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 	if (!ops)
62 		return -ENOMEM;
63 
64 	/* Currently only the non stack version is supported */
65 	ops->func = function_trace_call;
66 	ops->flags = FTRACE_OPS_FL_PID;
67 
68 	tr->ops = ops;
69 	ops->private = tr;
70 
71 	return 0;
72 }
73 
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
75 {
76 	kfree(tr->ops);
77 	tr->ops = NULL;
78 }
79 
80 int ftrace_create_function_files(struct trace_array *tr,
81 				 struct dentry *parent)
82 {
83 	int ret;
84 	/*
85 	 * The top level array uses the "global_ops", and the files are
86 	 * created on boot up.
87 	 */
88 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89 		return 0;
90 
91 	if (!tr->ops)
92 		return -EINVAL;
93 
94 	ret = allocate_fgraph_ops(tr, tr->ops);
95 	if (ret) {
96 		kfree(tr->ops);
97 		return ret;
98 	}
99 
100 	ftrace_create_filter_files(tr->ops, parent);
101 
102 	return 0;
103 }
104 
105 void ftrace_destroy_function_files(struct trace_array *tr)
106 {
107 	ftrace_destroy_filter_files(tr->ops);
108 	ftrace_free_ftrace_ops(tr);
109 	free_fgraph_ops(tr);
110 }
111 
112 static ftrace_func_t select_trace_function(u32 flags_val)
113 {
114 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
115 	case TRACE_FUNC_NO_OPTS:
116 		return function_trace_call;
117 	case TRACE_FUNC_OPT_STACK:
118 		return function_stack_trace_call;
119 	case TRACE_FUNC_OPT_NO_REPEATS:
120 		return function_no_repeats_trace_call;
121 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122 		return function_stack_no_repeats_trace_call;
123 	default:
124 		return NULL;
125 	}
126 }
127 
128 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129 {
130 	if (!tr->last_func_repeats &&
131 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133 		if (!tr->last_func_repeats)
134 			return false;
135 	}
136 
137 	return true;
138 }
139 
140 static int function_trace_init(struct trace_array *tr)
141 {
142 	ftrace_func_t func;
143 	/*
144 	 * Instance trace_arrays get their ops allocated
145 	 * at instance creation. Unless it failed
146 	 * the allocation.
147 	 */
148 	if (!tr->ops)
149 		return -ENOMEM;
150 
151 	func = select_trace_function(func_flags.val);
152 	if (!func)
153 		return -EINVAL;
154 
155 	if (!handle_func_repeats(tr, func_flags.val))
156 		return -ENOMEM;
157 
158 	ftrace_init_array_ops(tr, func);
159 
160 	tr->array_buffer.cpu = raw_smp_processor_id();
161 
162 	tracing_start_cmdline_record();
163 	tracing_start_function_trace(tr);
164 	return 0;
165 }
166 
167 static void function_trace_reset(struct trace_array *tr)
168 {
169 	tracing_stop_function_trace(tr);
170 	tracing_stop_cmdline_record();
171 	ftrace_reset_array_ops(tr);
172 }
173 
174 static void function_trace_start(struct trace_array *tr)
175 {
176 	tracing_reset_online_cpus(&tr->array_buffer);
177 }
178 
179 static void
180 function_trace_call(unsigned long ip, unsigned long parent_ip,
181 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
182 {
183 	struct trace_array *tr = op->private;
184 	struct trace_array_cpu *data;
185 	unsigned int trace_ctx;
186 	int bit;
187 
188 	if (unlikely(!tr->function_enabled))
189 		return;
190 
191 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
192 	if (bit < 0)
193 		return;
194 
195 	trace_ctx = tracing_gen_ctx();
196 
197 	data = this_cpu_ptr(tr->array_buffer.data);
198 	if (!atomic_read(&data->disabled))
199 		trace_function(tr, ip, parent_ip, trace_ctx);
200 
201 	ftrace_test_recursion_unlock(bit);
202 }
203 
204 #ifdef CONFIG_UNWINDER_ORC
205 /*
206  * Skip 2:
207  *
208  *   function_stack_trace_call()
209  *   ftrace_call()
210  */
211 #define STACK_SKIP 2
212 #else
213 /*
214  * Skip 3:
215  *   __trace_stack()
216  *   function_stack_trace_call()
217  *   ftrace_call()
218  */
219 #define STACK_SKIP 3
220 #endif
221 
222 static void
223 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
224 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
225 {
226 	struct trace_array *tr = op->private;
227 	struct trace_array_cpu *data;
228 	unsigned long flags;
229 	long disabled;
230 	int cpu;
231 	unsigned int trace_ctx;
232 	int skip = STACK_SKIP;
233 
234 	if (unlikely(!tr->function_enabled))
235 		return;
236 
237 	/*
238 	 * Need to use raw, since this must be called before the
239 	 * recursive protection is performed.
240 	 */
241 	local_irq_save(flags);
242 	cpu = raw_smp_processor_id();
243 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
244 	disabled = atomic_inc_return(&data->disabled);
245 
246 	if (likely(disabled == 1)) {
247 		trace_ctx = tracing_gen_ctx_flags(flags);
248 		trace_function(tr, ip, parent_ip, trace_ctx);
249 #ifdef CONFIG_UNWINDER_FRAME_POINTER
250 		if (ftrace_pids_enabled(op))
251 			skip++;
252 #endif
253 		__trace_stack(tr, trace_ctx, skip);
254 	}
255 
256 	atomic_dec(&data->disabled);
257 	local_irq_restore(flags);
258 }
259 
260 static inline bool is_repeat_check(struct trace_array *tr,
261 				   struct trace_func_repeats *last_info,
262 				   unsigned long ip, unsigned long parent_ip)
263 {
264 	if (last_info->ip == ip &&
265 	    last_info->parent_ip == parent_ip &&
266 	    last_info->count < U16_MAX) {
267 		last_info->ts_last_call =
268 			ring_buffer_time_stamp(tr->array_buffer.buffer);
269 		last_info->count++;
270 		return true;
271 	}
272 
273 	return false;
274 }
275 
276 static inline void process_repeats(struct trace_array *tr,
277 				   unsigned long ip, unsigned long parent_ip,
278 				   struct trace_func_repeats *last_info,
279 				   unsigned int trace_ctx)
280 {
281 	if (last_info->count) {
282 		trace_last_func_repeats(tr, last_info, trace_ctx);
283 		last_info->count = 0;
284 	}
285 
286 	last_info->ip = ip;
287 	last_info->parent_ip = parent_ip;
288 }
289 
290 static void
291 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
292 			       struct ftrace_ops *op,
293 			       struct ftrace_regs *fregs)
294 {
295 	struct trace_func_repeats *last_info;
296 	struct trace_array *tr = op->private;
297 	struct trace_array_cpu *data;
298 	unsigned int trace_ctx;
299 	unsigned long flags;
300 	int bit;
301 
302 	if (unlikely(!tr->function_enabled))
303 		return;
304 
305 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
306 	if (bit < 0)
307 		return;
308 
309 	data = this_cpu_ptr(tr->array_buffer.data);
310 	if (atomic_read(&data->disabled))
311 		goto out;
312 
313 	/*
314 	 * An interrupt may happen at any place here. But as far as I can see,
315 	 * the only damage that this can cause is to mess up the repetition
316 	 * counter without valuable data being lost.
317 	 * TODO: think about a solution that is better than just hoping to be
318 	 * lucky.
319 	 */
320 	last_info = this_cpu_ptr(tr->last_func_repeats);
321 	if (is_repeat_check(tr, last_info, ip, parent_ip))
322 		goto out;
323 
324 	local_save_flags(flags);
325 	trace_ctx = tracing_gen_ctx_flags(flags);
326 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
327 
328 	trace_function(tr, ip, parent_ip, trace_ctx);
329 
330 out:
331 	ftrace_test_recursion_unlock(bit);
332 }
333 
334 static void
335 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
336 				     struct ftrace_ops *op,
337 				     struct ftrace_regs *fregs)
338 {
339 	struct trace_func_repeats *last_info;
340 	struct trace_array *tr = op->private;
341 	struct trace_array_cpu *data;
342 	unsigned long flags;
343 	long disabled;
344 	int cpu;
345 	unsigned int trace_ctx;
346 
347 	if (unlikely(!tr->function_enabled))
348 		return;
349 
350 	/*
351 	 * Need to use raw, since this must be called before the
352 	 * recursive protection is performed.
353 	 */
354 	local_irq_save(flags);
355 	cpu = raw_smp_processor_id();
356 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
357 	disabled = atomic_inc_return(&data->disabled);
358 
359 	if (likely(disabled == 1)) {
360 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
361 		if (is_repeat_check(tr, last_info, ip, parent_ip))
362 			goto out;
363 
364 		trace_ctx = tracing_gen_ctx_flags(flags);
365 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
366 
367 		trace_function(tr, ip, parent_ip, trace_ctx);
368 		__trace_stack(tr, trace_ctx, STACK_SKIP);
369 	}
370 
371  out:
372 	atomic_dec(&data->disabled);
373 	local_irq_restore(flags);
374 }
375 
376 static struct tracer_opt func_opts[] = {
377 #ifdef CONFIG_STACKTRACE
378 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
379 #endif
380 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
381 	{ } /* Always set a last empty entry */
382 };
383 
384 static struct tracer_flags func_flags = {
385 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
386 	.opts = func_opts
387 };
388 
389 static void tracing_start_function_trace(struct trace_array *tr)
390 {
391 	tr->function_enabled = 0;
392 	register_ftrace_function(tr->ops);
393 	tr->function_enabled = 1;
394 }
395 
396 static void tracing_stop_function_trace(struct trace_array *tr)
397 {
398 	tr->function_enabled = 0;
399 	unregister_ftrace_function(tr->ops);
400 }
401 
402 static struct tracer function_trace;
403 
404 static int
405 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
406 {
407 	ftrace_func_t func;
408 	u32 new_flags;
409 
410 	/* Do nothing if already set. */
411 	if (!!set == !!(func_flags.val & bit))
412 		return 0;
413 
414 	/* We can change this flag only when not running. */
415 	if (tr->current_trace != &function_trace)
416 		return 0;
417 
418 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
419 	func = select_trace_function(new_flags);
420 	if (!func)
421 		return -EINVAL;
422 
423 	/* Check if there's anything to change. */
424 	if (tr->ops->func == func)
425 		return 0;
426 
427 	if (!handle_func_repeats(tr, new_flags))
428 		return -ENOMEM;
429 
430 	unregister_ftrace_function(tr->ops);
431 	tr->ops->func = func;
432 	register_ftrace_function(tr->ops);
433 
434 	return 0;
435 }
436 
437 static struct tracer function_trace __tracer_data =
438 {
439 	.name		= "function",
440 	.init		= function_trace_init,
441 	.reset		= function_trace_reset,
442 	.start		= function_trace_start,
443 	.flags		= &func_flags,
444 	.set_flag	= func_set_flag,
445 	.allow_instances = true,
446 #ifdef CONFIG_FTRACE_SELFTEST
447 	.selftest	= trace_selftest_startup_function,
448 #endif
449 };
450 
451 #ifdef CONFIG_DYNAMIC_FTRACE
452 static void update_traceon_count(struct ftrace_probe_ops *ops,
453 				 unsigned long ip,
454 				 struct trace_array *tr, bool on,
455 				 void *data)
456 {
457 	struct ftrace_func_mapper *mapper = data;
458 	long *count;
459 	long old_count;
460 
461 	/*
462 	 * Tracing gets disabled (or enabled) once per count.
463 	 * This function can be called at the same time on multiple CPUs.
464 	 * It is fine if both disable (or enable) tracing, as disabling
465 	 * (or enabling) the second time doesn't do anything as the
466 	 * state of the tracer is already disabled (or enabled).
467 	 * What needs to be synchronized in this case is that the count
468 	 * only gets decremented once, even if the tracer is disabled
469 	 * (or enabled) twice, as the second one is really a nop.
470 	 *
471 	 * The memory barriers guarantee that we only decrement the
472 	 * counter once. First the count is read to a local variable
473 	 * and a read barrier is used to make sure that it is loaded
474 	 * before checking if the tracer is in the state we want.
475 	 * If the tracer is not in the state we want, then the count
476 	 * is guaranteed to be the old count.
477 	 *
478 	 * Next the tracer is set to the state we want (disabled or enabled)
479 	 * then a write memory barrier is used to make sure that
480 	 * the new state is visible before changing the counter by
481 	 * one minus the old counter. This guarantees that another CPU
482 	 * executing this code will see the new state before seeing
483 	 * the new counter value, and would not do anything if the new
484 	 * counter is seen.
485 	 *
486 	 * Note, there is no synchronization between this and a user
487 	 * setting the tracing_on file. But we currently don't care
488 	 * about that.
489 	 */
490 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
491 	old_count = *count;
492 
493 	if (old_count <= 0)
494 		return;
495 
496 	/* Make sure we see count before checking tracing state */
497 	smp_rmb();
498 
499 	if (on == !!tracer_tracing_is_on(tr))
500 		return;
501 
502 	if (on)
503 		tracer_tracing_on(tr);
504 	else
505 		tracer_tracing_off(tr);
506 
507 	/* Make sure tracing state is visible before updating count */
508 	smp_wmb();
509 
510 	*count = old_count - 1;
511 }
512 
513 static void
514 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
515 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
516 		     void *data)
517 {
518 	update_traceon_count(ops, ip, tr, 1, data);
519 }
520 
521 static void
522 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
523 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
524 		      void *data)
525 {
526 	update_traceon_count(ops, ip, tr, 0, data);
527 }
528 
529 static void
530 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
531 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
532 	       void *data)
533 {
534 	if (tracer_tracing_is_on(tr))
535 		return;
536 
537 	tracer_tracing_on(tr);
538 }
539 
540 static void
541 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
542 		struct trace_array *tr, struct ftrace_probe_ops *ops,
543 		void *data)
544 {
545 	if (!tracer_tracing_is_on(tr))
546 		return;
547 
548 	tracer_tracing_off(tr);
549 }
550 
551 #ifdef CONFIG_UNWINDER_ORC
552 /*
553  * Skip 3:
554  *
555  *   function_trace_probe_call()
556  *   ftrace_ops_assist_func()
557  *   ftrace_call()
558  */
559 #define FTRACE_STACK_SKIP 3
560 #else
561 /*
562  * Skip 5:
563  *
564  *   __trace_stack()
565  *   ftrace_stacktrace()
566  *   function_trace_probe_call()
567  *   ftrace_ops_assist_func()
568  *   ftrace_call()
569  */
570 #define FTRACE_STACK_SKIP 5
571 #endif
572 
573 static __always_inline void trace_stack(struct trace_array *tr)
574 {
575 	unsigned int trace_ctx;
576 
577 	trace_ctx = tracing_gen_ctx();
578 
579 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
580 }
581 
582 static void
583 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
584 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
585 		  void *data)
586 {
587 	trace_stack(tr);
588 }
589 
590 static void
591 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
592 			struct trace_array *tr, struct ftrace_probe_ops *ops,
593 			void *data)
594 {
595 	struct ftrace_func_mapper *mapper = data;
596 	long *count;
597 	long old_count;
598 	long new_count;
599 
600 	if (!tracing_is_on())
601 		return;
602 
603 	/* unlimited? */
604 	if (!mapper) {
605 		trace_stack(tr);
606 		return;
607 	}
608 
609 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
610 
611 	/*
612 	 * Stack traces should only execute the number of times the
613 	 * user specified in the counter.
614 	 */
615 	do {
616 		old_count = *count;
617 
618 		if (!old_count)
619 			return;
620 
621 		new_count = old_count - 1;
622 		new_count = cmpxchg(count, old_count, new_count);
623 		if (new_count == old_count)
624 			trace_stack(tr);
625 
626 		if (!tracing_is_on())
627 			return;
628 
629 	} while (new_count != old_count);
630 }
631 
632 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
633 			void *data)
634 {
635 	struct ftrace_func_mapper *mapper = data;
636 	long *count = NULL;
637 
638 	if (mapper)
639 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
640 
641 	if (count) {
642 		if (*count <= 0)
643 			return 0;
644 		(*count)--;
645 	}
646 
647 	return 1;
648 }
649 
650 static void
651 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
652 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
653 		  void *data)
654 {
655 	if (update_count(ops, ip, data))
656 		ftrace_dump(DUMP_ALL);
657 }
658 
659 /* Only dump the current CPU buffer. */
660 static void
661 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
662 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
663 		     void *data)
664 {
665 	if (update_count(ops, ip, data))
666 		ftrace_dump(DUMP_ORIG);
667 }
668 
669 static int
670 ftrace_probe_print(const char *name, struct seq_file *m,
671 		   unsigned long ip, struct ftrace_probe_ops *ops,
672 		   void *data)
673 {
674 	struct ftrace_func_mapper *mapper = data;
675 	long *count = NULL;
676 
677 	seq_printf(m, "%ps:%s", (void *)ip, name);
678 
679 	if (mapper)
680 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
681 
682 	if (count)
683 		seq_printf(m, ":count=%ld\n", *count);
684 	else
685 		seq_puts(m, ":unlimited\n");
686 
687 	return 0;
688 }
689 
690 static int
691 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
692 		     struct ftrace_probe_ops *ops,
693 		     void *data)
694 {
695 	return ftrace_probe_print("traceon", m, ip, ops, data);
696 }
697 
698 static int
699 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
700 			 struct ftrace_probe_ops *ops, void *data)
701 {
702 	return ftrace_probe_print("traceoff", m, ip, ops, data);
703 }
704 
705 static int
706 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
707 			struct ftrace_probe_ops *ops, void *data)
708 {
709 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
710 }
711 
712 static int
713 ftrace_dump_print(struct seq_file *m, unsigned long ip,
714 			struct ftrace_probe_ops *ops, void *data)
715 {
716 	return ftrace_probe_print("dump", m, ip, ops, data);
717 }
718 
719 static int
720 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
721 			struct ftrace_probe_ops *ops, void *data)
722 {
723 	return ftrace_probe_print("cpudump", m, ip, ops, data);
724 }
725 
726 
727 static int
728 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
729 		  unsigned long ip, void *init_data, void **data)
730 {
731 	struct ftrace_func_mapper *mapper = *data;
732 
733 	if (!mapper) {
734 		mapper = allocate_ftrace_func_mapper();
735 		if (!mapper)
736 			return -ENOMEM;
737 		*data = mapper;
738 	}
739 
740 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
741 }
742 
743 static void
744 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
745 		  unsigned long ip, void *data)
746 {
747 	struct ftrace_func_mapper *mapper = data;
748 
749 	if (!ip) {
750 		free_ftrace_func_mapper(mapper, NULL);
751 		return;
752 	}
753 
754 	ftrace_func_mapper_remove_ip(mapper, ip);
755 }
756 
757 static struct ftrace_probe_ops traceon_count_probe_ops = {
758 	.func			= ftrace_traceon_count,
759 	.print			= ftrace_traceon_print,
760 	.init			= ftrace_count_init,
761 	.free			= ftrace_count_free,
762 };
763 
764 static struct ftrace_probe_ops traceoff_count_probe_ops = {
765 	.func			= ftrace_traceoff_count,
766 	.print			= ftrace_traceoff_print,
767 	.init			= ftrace_count_init,
768 	.free			= ftrace_count_free,
769 };
770 
771 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
772 	.func			= ftrace_stacktrace_count,
773 	.print			= ftrace_stacktrace_print,
774 	.init			= ftrace_count_init,
775 	.free			= ftrace_count_free,
776 };
777 
778 static struct ftrace_probe_ops dump_probe_ops = {
779 	.func			= ftrace_dump_probe,
780 	.print			= ftrace_dump_print,
781 	.init			= ftrace_count_init,
782 	.free			= ftrace_count_free,
783 };
784 
785 static struct ftrace_probe_ops cpudump_probe_ops = {
786 	.func			= ftrace_cpudump_probe,
787 	.print			= ftrace_cpudump_print,
788 };
789 
790 static struct ftrace_probe_ops traceon_probe_ops = {
791 	.func			= ftrace_traceon,
792 	.print			= ftrace_traceon_print,
793 };
794 
795 static struct ftrace_probe_ops traceoff_probe_ops = {
796 	.func			= ftrace_traceoff,
797 	.print			= ftrace_traceoff_print,
798 };
799 
800 static struct ftrace_probe_ops stacktrace_probe_ops = {
801 	.func			= ftrace_stacktrace,
802 	.print			= ftrace_stacktrace_print,
803 };
804 
805 static int
806 ftrace_trace_probe_callback(struct trace_array *tr,
807 			    struct ftrace_probe_ops *ops,
808 			    struct ftrace_hash *hash, char *glob,
809 			    char *cmd, char *param, int enable)
810 {
811 	void *count = (void *)-1;
812 	char *number;
813 	int ret;
814 
815 	/* hash funcs only work with set_ftrace_filter */
816 	if (!enable)
817 		return -EINVAL;
818 
819 	if (glob[0] == '!')
820 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
821 
822 	if (!param)
823 		goto out_reg;
824 
825 	number = strsep(&param, ":");
826 
827 	if (!strlen(number))
828 		goto out_reg;
829 
830 	/*
831 	 * We use the callback data field (which is a pointer)
832 	 * as our counter.
833 	 */
834 	ret = kstrtoul(number, 0, (unsigned long *)&count);
835 	if (ret)
836 		return ret;
837 
838  out_reg:
839 	ret = register_ftrace_function_probe(glob, tr, ops, count);
840 
841 	return ret < 0 ? ret : 0;
842 }
843 
844 static int
845 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
846 			    char *glob, char *cmd, char *param, int enable)
847 {
848 	struct ftrace_probe_ops *ops;
849 
850 	if (!tr)
851 		return -ENODEV;
852 
853 	/* we register both traceon and traceoff to this callback */
854 	if (strcmp(cmd, "traceon") == 0)
855 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
856 	else
857 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
858 
859 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
860 					   param, enable);
861 }
862 
863 static int
864 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
865 			   char *glob, char *cmd, char *param, int enable)
866 {
867 	struct ftrace_probe_ops *ops;
868 
869 	if (!tr)
870 		return -ENODEV;
871 
872 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
873 
874 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
875 					   param, enable);
876 }
877 
878 static int
879 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
880 			   char *glob, char *cmd, char *param, int enable)
881 {
882 	struct ftrace_probe_ops *ops;
883 
884 	if (!tr)
885 		return -ENODEV;
886 
887 	ops = &dump_probe_ops;
888 
889 	/* Only dump once. */
890 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
891 					   "1", enable);
892 }
893 
894 static int
895 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
896 			   char *glob, char *cmd, char *param, int enable)
897 {
898 	struct ftrace_probe_ops *ops;
899 
900 	if (!tr)
901 		return -ENODEV;
902 
903 	ops = &cpudump_probe_ops;
904 
905 	/* Only dump once. */
906 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
907 					   "1", enable);
908 }
909 
910 static struct ftrace_func_command ftrace_traceon_cmd = {
911 	.name			= "traceon",
912 	.func			= ftrace_trace_onoff_callback,
913 };
914 
915 static struct ftrace_func_command ftrace_traceoff_cmd = {
916 	.name			= "traceoff",
917 	.func			= ftrace_trace_onoff_callback,
918 };
919 
920 static struct ftrace_func_command ftrace_stacktrace_cmd = {
921 	.name			= "stacktrace",
922 	.func			= ftrace_stacktrace_callback,
923 };
924 
925 static struct ftrace_func_command ftrace_dump_cmd = {
926 	.name			= "dump",
927 	.func			= ftrace_dump_callback,
928 };
929 
930 static struct ftrace_func_command ftrace_cpudump_cmd = {
931 	.name			= "cpudump",
932 	.func			= ftrace_cpudump_callback,
933 };
934 
935 static int __init init_func_cmd_traceon(void)
936 {
937 	int ret;
938 
939 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
940 	if (ret)
941 		return ret;
942 
943 	ret = register_ftrace_command(&ftrace_traceon_cmd);
944 	if (ret)
945 		goto out_free_traceoff;
946 
947 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
948 	if (ret)
949 		goto out_free_traceon;
950 
951 	ret = register_ftrace_command(&ftrace_dump_cmd);
952 	if (ret)
953 		goto out_free_stacktrace;
954 
955 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
956 	if (ret)
957 		goto out_free_dump;
958 
959 	return 0;
960 
961  out_free_dump:
962 	unregister_ftrace_command(&ftrace_dump_cmd);
963  out_free_stacktrace:
964 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
965  out_free_traceon:
966 	unregister_ftrace_command(&ftrace_traceon_cmd);
967  out_free_traceoff:
968 	unregister_ftrace_command(&ftrace_traceoff_cmd);
969 
970 	return ret;
971 }
972 #else
973 static inline int init_func_cmd_traceon(void)
974 {
975 	return 0;
976 }
977 #endif /* CONFIG_DYNAMIC_FTRACE */
978 
979 __init int init_function_trace(void)
980 {
981 	init_func_cmd_traceon();
982 	return register_tracer(&function_trace);
983 }
984