xref: /linux/kernel/trace/trace_functions.c (revision 5f7fb89a115d53b4a10bf7ba2733e78df281e98d)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * ring buffer based function tracer
4   *
5   * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6   * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7   *
8   * Based on code from the latency_tracer, that is:
9   *
10   *  Copyright (C) 2004-2006 Ingo Molnar
11   *  Copyright (C) 2004 Nadia Yvette Chambers
12   */
13  #include <linux/ring_buffer.h>
14  #include <linux/debugfs.h>
15  #include <linux/uaccess.h>
16  #include <linux/ftrace.h>
17  #include <linux/slab.h>
18  #include <linux/fs.h>
19  
20  #include "trace.h"
21  
22  static void tracing_start_function_trace(struct trace_array *tr);
23  static void tracing_stop_function_trace(struct trace_array *tr);
24  static void
25  function_trace_call(unsigned long ip, unsigned long parent_ip,
26  		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27  static void
28  function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29  			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30  static void
31  function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32  			       struct ftrace_ops *op, struct ftrace_regs *fregs);
33  static void
34  function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35  				     struct ftrace_ops *op,
36  				     struct ftrace_regs *fregs);
37  static struct tracer_flags func_flags;
38  
39  /* Our option */
40  enum {
41  
42  	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
43  	TRACE_FUNC_OPT_STACK		= 0x1,
44  	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
45  
46  	/* Update this to next highest bit. */
47  	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
48  };
49  
50  #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51  
52  int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53  {
54  	struct ftrace_ops *ops;
55  
56  	/* The top level array uses the "global_ops" */
57  	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58  		return 0;
59  
60  	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61  	if (!ops)
62  		return -ENOMEM;
63  
64  	/* Currently only the non stack version is supported */
65  	ops->func = function_trace_call;
66  	ops->flags = FTRACE_OPS_FL_PID;
67  
68  	tr->ops = ops;
69  	ops->private = tr;
70  
71  	return 0;
72  }
73  
74  void ftrace_free_ftrace_ops(struct trace_array *tr)
75  {
76  	kfree(tr->ops);
77  	tr->ops = NULL;
78  }
79  
80  int ftrace_create_function_files(struct trace_array *tr,
81  				 struct dentry *parent)
82  {
83  	int ret;
84  	/*
85  	 * The top level array uses the "global_ops", and the files are
86  	 * created on boot up.
87  	 */
88  	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
89  		return 0;
90  
91  	if (!tr->ops)
92  		return -EINVAL;
93  
94  	ret = allocate_fgraph_ops(tr, tr->ops);
95  	if (ret) {
96  		kfree(tr->ops);
97  		return ret;
98  	}
99  
100  	ftrace_create_filter_files(tr->ops, parent);
101  
102  	return 0;
103  }
104  
105  void ftrace_destroy_function_files(struct trace_array *tr)
106  {
107  	ftrace_destroy_filter_files(tr->ops);
108  	ftrace_free_ftrace_ops(tr);
109  	free_fgraph_ops(tr);
110  }
111  
112  static ftrace_func_t select_trace_function(u32 flags_val)
113  {
114  	switch (flags_val & TRACE_FUNC_OPT_MASK) {
115  	case TRACE_FUNC_NO_OPTS:
116  		return function_trace_call;
117  	case TRACE_FUNC_OPT_STACK:
118  		return function_stack_trace_call;
119  	case TRACE_FUNC_OPT_NO_REPEATS:
120  		return function_no_repeats_trace_call;
121  	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
122  		return function_stack_no_repeats_trace_call;
123  	default:
124  		return NULL;
125  	}
126  }
127  
128  static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
129  {
130  	if (!tr->last_func_repeats &&
131  	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
132  		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
133  		if (!tr->last_func_repeats)
134  			return false;
135  	}
136  
137  	return true;
138  }
139  
140  static int function_trace_init(struct trace_array *tr)
141  {
142  	ftrace_func_t func;
143  	/*
144  	 * Instance trace_arrays get their ops allocated
145  	 * at instance creation. Unless it failed
146  	 * the allocation.
147  	 */
148  	if (!tr->ops)
149  		return -ENOMEM;
150  
151  	func = select_trace_function(func_flags.val);
152  	if (!func)
153  		return -EINVAL;
154  
155  	if (!handle_func_repeats(tr, func_flags.val))
156  		return -ENOMEM;
157  
158  	ftrace_init_array_ops(tr, func);
159  
160  	tr->array_buffer.cpu = raw_smp_processor_id();
161  
162  	tracing_start_cmdline_record();
163  	tracing_start_function_trace(tr);
164  	return 0;
165  }
166  
167  static void function_trace_reset(struct trace_array *tr)
168  {
169  	tracing_stop_function_trace(tr);
170  	tracing_stop_cmdline_record();
171  	ftrace_reset_array_ops(tr);
172  }
173  
174  static void function_trace_start(struct trace_array *tr)
175  {
176  	tracing_reset_online_cpus(&tr->array_buffer);
177  }
178  
179  static void
180  function_trace_call(unsigned long ip, unsigned long parent_ip,
181  		    struct ftrace_ops *op, struct ftrace_regs *fregs)
182  {
183  	struct trace_array *tr = op->private;
184  	struct trace_array_cpu *data;
185  	unsigned int trace_ctx;
186  	int bit;
187  	int cpu;
188  
189  	if (unlikely(!tr->function_enabled))
190  		return;
191  
192  	bit = ftrace_test_recursion_trylock(ip, parent_ip);
193  	if (bit < 0)
194  		return;
195  
196  	trace_ctx = tracing_gen_ctx();
197  
198  	cpu = smp_processor_id();
199  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
200  	if (!atomic_read(&data->disabled))
201  		trace_function(tr, ip, parent_ip, trace_ctx);
202  
203  	ftrace_test_recursion_unlock(bit);
204  }
205  
206  #ifdef CONFIG_UNWINDER_ORC
207  /*
208   * Skip 2:
209   *
210   *   function_stack_trace_call()
211   *   ftrace_call()
212   */
213  #define STACK_SKIP 2
214  #else
215  /*
216   * Skip 3:
217   *   __trace_stack()
218   *   function_stack_trace_call()
219   *   ftrace_call()
220   */
221  #define STACK_SKIP 3
222  #endif
223  
224  static void
225  function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
226  			  struct ftrace_ops *op, struct ftrace_regs *fregs)
227  {
228  	struct trace_array *tr = op->private;
229  	struct trace_array_cpu *data;
230  	unsigned long flags;
231  	long disabled;
232  	int cpu;
233  	unsigned int trace_ctx;
234  	int skip = STACK_SKIP;
235  
236  	if (unlikely(!tr->function_enabled))
237  		return;
238  
239  	/*
240  	 * Need to use raw, since this must be called before the
241  	 * recursive protection is performed.
242  	 */
243  	local_irq_save(flags);
244  	cpu = raw_smp_processor_id();
245  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
246  	disabled = atomic_inc_return(&data->disabled);
247  
248  	if (likely(disabled == 1)) {
249  		trace_ctx = tracing_gen_ctx_flags(flags);
250  		trace_function(tr, ip, parent_ip, trace_ctx);
251  #ifdef CONFIG_UNWINDER_FRAME_POINTER
252  		if (ftrace_pids_enabled(op))
253  			skip++;
254  #endif
255  		__trace_stack(tr, trace_ctx, skip);
256  	}
257  
258  	atomic_dec(&data->disabled);
259  	local_irq_restore(flags);
260  }
261  
262  static inline bool is_repeat_check(struct trace_array *tr,
263  				   struct trace_func_repeats *last_info,
264  				   unsigned long ip, unsigned long parent_ip)
265  {
266  	if (last_info->ip == ip &&
267  	    last_info->parent_ip == parent_ip &&
268  	    last_info->count < U16_MAX) {
269  		last_info->ts_last_call =
270  			ring_buffer_time_stamp(tr->array_buffer.buffer);
271  		last_info->count++;
272  		return true;
273  	}
274  
275  	return false;
276  }
277  
278  static inline void process_repeats(struct trace_array *tr,
279  				   unsigned long ip, unsigned long parent_ip,
280  				   struct trace_func_repeats *last_info,
281  				   unsigned int trace_ctx)
282  {
283  	if (last_info->count) {
284  		trace_last_func_repeats(tr, last_info, trace_ctx);
285  		last_info->count = 0;
286  	}
287  
288  	last_info->ip = ip;
289  	last_info->parent_ip = parent_ip;
290  }
291  
292  static void
293  function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
294  			       struct ftrace_ops *op,
295  			       struct ftrace_regs *fregs)
296  {
297  	struct trace_func_repeats *last_info;
298  	struct trace_array *tr = op->private;
299  	struct trace_array_cpu *data;
300  	unsigned int trace_ctx;
301  	unsigned long flags;
302  	int bit;
303  	int cpu;
304  
305  	if (unlikely(!tr->function_enabled))
306  		return;
307  
308  	bit = ftrace_test_recursion_trylock(ip, parent_ip);
309  	if (bit < 0)
310  		return;
311  
312  	cpu = smp_processor_id();
313  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
314  	if (atomic_read(&data->disabled))
315  		goto out;
316  
317  	/*
318  	 * An interrupt may happen at any place here. But as far as I can see,
319  	 * the only damage that this can cause is to mess up the repetition
320  	 * counter without valuable data being lost.
321  	 * TODO: think about a solution that is better than just hoping to be
322  	 * lucky.
323  	 */
324  	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
325  	if (is_repeat_check(tr, last_info, ip, parent_ip))
326  		goto out;
327  
328  	local_save_flags(flags);
329  	trace_ctx = tracing_gen_ctx_flags(flags);
330  	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
331  
332  	trace_function(tr, ip, parent_ip, trace_ctx);
333  
334  out:
335  	ftrace_test_recursion_unlock(bit);
336  }
337  
338  static void
339  function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
340  				     struct ftrace_ops *op,
341  				     struct ftrace_regs *fregs)
342  {
343  	struct trace_func_repeats *last_info;
344  	struct trace_array *tr = op->private;
345  	struct trace_array_cpu *data;
346  	unsigned long flags;
347  	long disabled;
348  	int cpu;
349  	unsigned int trace_ctx;
350  
351  	if (unlikely(!tr->function_enabled))
352  		return;
353  
354  	/*
355  	 * Need to use raw, since this must be called before the
356  	 * recursive protection is performed.
357  	 */
358  	local_irq_save(flags);
359  	cpu = raw_smp_processor_id();
360  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
361  	disabled = atomic_inc_return(&data->disabled);
362  
363  	if (likely(disabled == 1)) {
364  		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
365  		if (is_repeat_check(tr, last_info, ip, parent_ip))
366  			goto out;
367  
368  		trace_ctx = tracing_gen_ctx_flags(flags);
369  		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
370  
371  		trace_function(tr, ip, parent_ip, trace_ctx);
372  		__trace_stack(tr, trace_ctx, STACK_SKIP);
373  	}
374  
375   out:
376  	atomic_dec(&data->disabled);
377  	local_irq_restore(flags);
378  }
379  
380  static struct tracer_opt func_opts[] = {
381  #ifdef CONFIG_STACKTRACE
382  	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
383  #endif
384  	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
385  	{ } /* Always set a last empty entry */
386  };
387  
388  static struct tracer_flags func_flags = {
389  	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
390  	.opts = func_opts
391  };
392  
393  static void tracing_start_function_trace(struct trace_array *tr)
394  {
395  	tr->function_enabled = 0;
396  	register_ftrace_function(tr->ops);
397  	tr->function_enabled = 1;
398  }
399  
400  static void tracing_stop_function_trace(struct trace_array *tr)
401  {
402  	tr->function_enabled = 0;
403  	unregister_ftrace_function(tr->ops);
404  }
405  
406  static struct tracer function_trace;
407  
408  static int
409  func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
410  {
411  	ftrace_func_t func;
412  	u32 new_flags;
413  
414  	/* Do nothing if already set. */
415  	if (!!set == !!(func_flags.val & bit))
416  		return 0;
417  
418  	/* We can change this flag only when not running. */
419  	if (tr->current_trace != &function_trace)
420  		return 0;
421  
422  	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
423  	func = select_trace_function(new_flags);
424  	if (!func)
425  		return -EINVAL;
426  
427  	/* Check if there's anything to change. */
428  	if (tr->ops->func == func)
429  		return 0;
430  
431  	if (!handle_func_repeats(tr, new_flags))
432  		return -ENOMEM;
433  
434  	unregister_ftrace_function(tr->ops);
435  	tr->ops->func = func;
436  	register_ftrace_function(tr->ops);
437  
438  	return 0;
439  }
440  
441  static struct tracer function_trace __tracer_data =
442  {
443  	.name		= "function",
444  	.init		= function_trace_init,
445  	.reset		= function_trace_reset,
446  	.start		= function_trace_start,
447  	.flags		= &func_flags,
448  	.set_flag	= func_set_flag,
449  	.allow_instances = true,
450  #ifdef CONFIG_FTRACE_SELFTEST
451  	.selftest	= trace_selftest_startup_function,
452  #endif
453  };
454  
455  #ifdef CONFIG_DYNAMIC_FTRACE
456  static void update_traceon_count(struct ftrace_probe_ops *ops,
457  				 unsigned long ip,
458  				 struct trace_array *tr, bool on,
459  				 void *data)
460  {
461  	struct ftrace_func_mapper *mapper = data;
462  	long *count;
463  	long old_count;
464  
465  	/*
466  	 * Tracing gets disabled (or enabled) once per count.
467  	 * This function can be called at the same time on multiple CPUs.
468  	 * It is fine if both disable (or enable) tracing, as disabling
469  	 * (or enabling) the second time doesn't do anything as the
470  	 * state of the tracer is already disabled (or enabled).
471  	 * What needs to be synchronized in this case is that the count
472  	 * only gets decremented once, even if the tracer is disabled
473  	 * (or enabled) twice, as the second one is really a nop.
474  	 *
475  	 * The memory barriers guarantee that we only decrement the
476  	 * counter once. First the count is read to a local variable
477  	 * and a read barrier is used to make sure that it is loaded
478  	 * before checking if the tracer is in the state we want.
479  	 * If the tracer is not in the state we want, then the count
480  	 * is guaranteed to be the old count.
481  	 *
482  	 * Next the tracer is set to the state we want (disabled or enabled)
483  	 * then a write memory barrier is used to make sure that
484  	 * the new state is visible before changing the counter by
485  	 * one minus the old counter. This guarantees that another CPU
486  	 * executing this code will see the new state before seeing
487  	 * the new counter value, and would not do anything if the new
488  	 * counter is seen.
489  	 *
490  	 * Note, there is no synchronization between this and a user
491  	 * setting the tracing_on file. But we currently don't care
492  	 * about that.
493  	 */
494  	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
495  	old_count = *count;
496  
497  	if (old_count <= 0)
498  		return;
499  
500  	/* Make sure we see count before checking tracing state */
501  	smp_rmb();
502  
503  	if (on == !!tracer_tracing_is_on(tr))
504  		return;
505  
506  	if (on)
507  		tracer_tracing_on(tr);
508  	else
509  		tracer_tracing_off(tr);
510  
511  	/* Make sure tracing state is visible before updating count */
512  	smp_wmb();
513  
514  	*count = old_count - 1;
515  }
516  
517  static void
518  ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
519  		     struct trace_array *tr, struct ftrace_probe_ops *ops,
520  		     void *data)
521  {
522  	update_traceon_count(ops, ip, tr, 1, data);
523  }
524  
525  static void
526  ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
527  		      struct trace_array *tr, struct ftrace_probe_ops *ops,
528  		      void *data)
529  {
530  	update_traceon_count(ops, ip, tr, 0, data);
531  }
532  
533  static void
534  ftrace_traceon(unsigned long ip, unsigned long parent_ip,
535  	       struct trace_array *tr, struct ftrace_probe_ops *ops,
536  	       void *data)
537  {
538  	if (tracer_tracing_is_on(tr))
539  		return;
540  
541  	tracer_tracing_on(tr);
542  }
543  
544  static void
545  ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
546  		struct trace_array *tr, struct ftrace_probe_ops *ops,
547  		void *data)
548  {
549  	if (!tracer_tracing_is_on(tr))
550  		return;
551  
552  	tracer_tracing_off(tr);
553  }
554  
555  #ifdef CONFIG_UNWINDER_ORC
556  /*
557   * Skip 3:
558   *
559   *   function_trace_probe_call()
560   *   ftrace_ops_assist_func()
561   *   ftrace_call()
562   */
563  #define FTRACE_STACK_SKIP 3
564  #else
565  /*
566   * Skip 5:
567   *
568   *   __trace_stack()
569   *   ftrace_stacktrace()
570   *   function_trace_probe_call()
571   *   ftrace_ops_assist_func()
572   *   ftrace_call()
573   */
574  #define FTRACE_STACK_SKIP 5
575  #endif
576  
577  static __always_inline void trace_stack(struct trace_array *tr)
578  {
579  	unsigned int trace_ctx;
580  
581  	trace_ctx = tracing_gen_ctx();
582  
583  	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
584  }
585  
586  static void
587  ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
588  		  struct trace_array *tr, struct ftrace_probe_ops *ops,
589  		  void *data)
590  {
591  	trace_stack(tr);
592  }
593  
594  static void
595  ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
596  			struct trace_array *tr, struct ftrace_probe_ops *ops,
597  			void *data)
598  {
599  	struct ftrace_func_mapper *mapper = data;
600  	long *count;
601  	long old_count;
602  	long new_count;
603  
604  	if (!tracing_is_on())
605  		return;
606  
607  	/* unlimited? */
608  	if (!mapper) {
609  		trace_stack(tr);
610  		return;
611  	}
612  
613  	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
614  
615  	/*
616  	 * Stack traces should only execute the number of times the
617  	 * user specified in the counter.
618  	 */
619  	do {
620  		old_count = *count;
621  
622  		if (!old_count)
623  			return;
624  
625  		new_count = old_count - 1;
626  		new_count = cmpxchg(count, old_count, new_count);
627  		if (new_count == old_count)
628  			trace_stack(tr);
629  
630  		if (!tracing_is_on())
631  			return;
632  
633  	} while (new_count != old_count);
634  }
635  
636  static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
637  			void *data)
638  {
639  	struct ftrace_func_mapper *mapper = data;
640  	long *count = NULL;
641  
642  	if (mapper)
643  		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
644  
645  	if (count) {
646  		if (*count <= 0)
647  			return 0;
648  		(*count)--;
649  	}
650  
651  	return 1;
652  }
653  
654  static void
655  ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
656  		  struct trace_array *tr, struct ftrace_probe_ops *ops,
657  		  void *data)
658  {
659  	if (update_count(ops, ip, data))
660  		ftrace_dump(DUMP_ALL);
661  }
662  
663  /* Only dump the current CPU buffer. */
664  static void
665  ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
666  		     struct trace_array *tr, struct ftrace_probe_ops *ops,
667  		     void *data)
668  {
669  	if (update_count(ops, ip, data))
670  		ftrace_dump(DUMP_ORIG);
671  }
672  
673  static int
674  ftrace_probe_print(const char *name, struct seq_file *m,
675  		   unsigned long ip, struct ftrace_probe_ops *ops,
676  		   void *data)
677  {
678  	struct ftrace_func_mapper *mapper = data;
679  	long *count = NULL;
680  
681  	seq_printf(m, "%ps:%s", (void *)ip, name);
682  
683  	if (mapper)
684  		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
685  
686  	if (count)
687  		seq_printf(m, ":count=%ld\n", *count);
688  	else
689  		seq_puts(m, ":unlimited\n");
690  
691  	return 0;
692  }
693  
694  static int
695  ftrace_traceon_print(struct seq_file *m, unsigned long ip,
696  		     struct ftrace_probe_ops *ops,
697  		     void *data)
698  {
699  	return ftrace_probe_print("traceon", m, ip, ops, data);
700  }
701  
702  static int
703  ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
704  			 struct ftrace_probe_ops *ops, void *data)
705  {
706  	return ftrace_probe_print("traceoff", m, ip, ops, data);
707  }
708  
709  static int
710  ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
711  			struct ftrace_probe_ops *ops, void *data)
712  {
713  	return ftrace_probe_print("stacktrace", m, ip, ops, data);
714  }
715  
716  static int
717  ftrace_dump_print(struct seq_file *m, unsigned long ip,
718  			struct ftrace_probe_ops *ops, void *data)
719  {
720  	return ftrace_probe_print("dump", m, ip, ops, data);
721  }
722  
723  static int
724  ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
725  			struct ftrace_probe_ops *ops, void *data)
726  {
727  	return ftrace_probe_print("cpudump", m, ip, ops, data);
728  }
729  
730  
731  static int
732  ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
733  		  unsigned long ip, void *init_data, void **data)
734  {
735  	struct ftrace_func_mapper *mapper = *data;
736  
737  	if (!mapper) {
738  		mapper = allocate_ftrace_func_mapper();
739  		if (!mapper)
740  			return -ENOMEM;
741  		*data = mapper;
742  	}
743  
744  	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
745  }
746  
747  static void
748  ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
749  		  unsigned long ip, void *data)
750  {
751  	struct ftrace_func_mapper *mapper = data;
752  
753  	if (!ip) {
754  		free_ftrace_func_mapper(mapper, NULL);
755  		return;
756  	}
757  
758  	ftrace_func_mapper_remove_ip(mapper, ip);
759  }
760  
761  static struct ftrace_probe_ops traceon_count_probe_ops = {
762  	.func			= ftrace_traceon_count,
763  	.print			= ftrace_traceon_print,
764  	.init			= ftrace_count_init,
765  	.free			= ftrace_count_free,
766  };
767  
768  static struct ftrace_probe_ops traceoff_count_probe_ops = {
769  	.func			= ftrace_traceoff_count,
770  	.print			= ftrace_traceoff_print,
771  	.init			= ftrace_count_init,
772  	.free			= ftrace_count_free,
773  };
774  
775  static struct ftrace_probe_ops stacktrace_count_probe_ops = {
776  	.func			= ftrace_stacktrace_count,
777  	.print			= ftrace_stacktrace_print,
778  	.init			= ftrace_count_init,
779  	.free			= ftrace_count_free,
780  };
781  
782  static struct ftrace_probe_ops dump_probe_ops = {
783  	.func			= ftrace_dump_probe,
784  	.print			= ftrace_dump_print,
785  	.init			= ftrace_count_init,
786  	.free			= ftrace_count_free,
787  };
788  
789  static struct ftrace_probe_ops cpudump_probe_ops = {
790  	.func			= ftrace_cpudump_probe,
791  	.print			= ftrace_cpudump_print,
792  };
793  
794  static struct ftrace_probe_ops traceon_probe_ops = {
795  	.func			= ftrace_traceon,
796  	.print			= ftrace_traceon_print,
797  };
798  
799  static struct ftrace_probe_ops traceoff_probe_ops = {
800  	.func			= ftrace_traceoff,
801  	.print			= ftrace_traceoff_print,
802  };
803  
804  static struct ftrace_probe_ops stacktrace_probe_ops = {
805  	.func			= ftrace_stacktrace,
806  	.print			= ftrace_stacktrace_print,
807  };
808  
809  static int
810  ftrace_trace_probe_callback(struct trace_array *tr,
811  			    struct ftrace_probe_ops *ops,
812  			    struct ftrace_hash *hash, char *glob,
813  			    char *cmd, char *param, int enable)
814  {
815  	void *count = (void *)-1;
816  	char *number;
817  	int ret;
818  
819  	/* hash funcs only work with set_ftrace_filter */
820  	if (!enable)
821  		return -EINVAL;
822  
823  	if (glob[0] == '!')
824  		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
825  
826  	if (!param)
827  		goto out_reg;
828  
829  	number = strsep(&param, ":");
830  
831  	if (!strlen(number))
832  		goto out_reg;
833  
834  	/*
835  	 * We use the callback data field (which is a pointer)
836  	 * as our counter.
837  	 */
838  	ret = kstrtoul(number, 0, (unsigned long *)&count);
839  	if (ret)
840  		return ret;
841  
842   out_reg:
843  	ret = register_ftrace_function_probe(glob, tr, ops, count);
844  
845  	return ret < 0 ? ret : 0;
846  }
847  
848  static int
849  ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
850  			    char *glob, char *cmd, char *param, int enable)
851  {
852  	struct ftrace_probe_ops *ops;
853  
854  	if (!tr)
855  		return -ENODEV;
856  
857  	/* we register both traceon and traceoff to this callback */
858  	if (strcmp(cmd, "traceon") == 0)
859  		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
860  	else
861  		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
862  
863  	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
864  					   param, enable);
865  }
866  
867  static int
868  ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
869  			   char *glob, char *cmd, char *param, int enable)
870  {
871  	struct ftrace_probe_ops *ops;
872  
873  	if (!tr)
874  		return -ENODEV;
875  
876  	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
877  
878  	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
879  					   param, enable);
880  }
881  
882  static int
883  ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
884  			   char *glob, char *cmd, char *param, int enable)
885  {
886  	struct ftrace_probe_ops *ops;
887  
888  	if (!tr)
889  		return -ENODEV;
890  
891  	ops = &dump_probe_ops;
892  
893  	/* Only dump once. */
894  	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
895  					   "1", enable);
896  }
897  
898  static int
899  ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
900  			   char *glob, char *cmd, char *param, int enable)
901  {
902  	struct ftrace_probe_ops *ops;
903  
904  	if (!tr)
905  		return -ENODEV;
906  
907  	ops = &cpudump_probe_ops;
908  
909  	/* Only dump once. */
910  	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
911  					   "1", enable);
912  }
913  
914  static struct ftrace_func_command ftrace_traceon_cmd = {
915  	.name			= "traceon",
916  	.func			= ftrace_trace_onoff_callback,
917  };
918  
919  static struct ftrace_func_command ftrace_traceoff_cmd = {
920  	.name			= "traceoff",
921  	.func			= ftrace_trace_onoff_callback,
922  };
923  
924  static struct ftrace_func_command ftrace_stacktrace_cmd = {
925  	.name			= "stacktrace",
926  	.func			= ftrace_stacktrace_callback,
927  };
928  
929  static struct ftrace_func_command ftrace_dump_cmd = {
930  	.name			= "dump",
931  	.func			= ftrace_dump_callback,
932  };
933  
934  static struct ftrace_func_command ftrace_cpudump_cmd = {
935  	.name			= "cpudump",
936  	.func			= ftrace_cpudump_callback,
937  };
938  
939  static int __init init_func_cmd_traceon(void)
940  {
941  	int ret;
942  
943  	ret = register_ftrace_command(&ftrace_traceoff_cmd);
944  	if (ret)
945  		return ret;
946  
947  	ret = register_ftrace_command(&ftrace_traceon_cmd);
948  	if (ret)
949  		goto out_free_traceoff;
950  
951  	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
952  	if (ret)
953  		goto out_free_traceon;
954  
955  	ret = register_ftrace_command(&ftrace_dump_cmd);
956  	if (ret)
957  		goto out_free_stacktrace;
958  
959  	ret = register_ftrace_command(&ftrace_cpudump_cmd);
960  	if (ret)
961  		goto out_free_dump;
962  
963  	return 0;
964  
965   out_free_dump:
966  	unregister_ftrace_command(&ftrace_dump_cmd);
967   out_free_stacktrace:
968  	unregister_ftrace_command(&ftrace_stacktrace_cmd);
969   out_free_traceon:
970  	unregister_ftrace_command(&ftrace_traceon_cmd);
971   out_free_traceoff:
972  	unregister_ftrace_command(&ftrace_traceoff_cmd);
973  
974  	return ret;
975  }
976  #else
977  static inline int init_func_cmd_traceon(void)
978  {
979  	return 0;
980  }
981  #endif /* CONFIG_DYNAMIC_FTRACE */
982  
983  __init int init_function_trace(void)
984  {
985  	init_func_cmd_traceon();
986  	return register_tracer(&function_trace);
987  }
988