xref: /linux/kernel/trace/trace_functions.c (revision 31eb415bf6f06c90fdd9b635caf3a6c5110a38b6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_args_trace_call(unsigned long ip, unsigned long parent_ip,
29 			 struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static void
31 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
32 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
33 static void
34 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
36 static void
37 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
38 				     struct ftrace_ops *op,
39 				     struct ftrace_regs *fregs);
40 static struct tracer_flags func_flags;
41 
42 /* Our option */
43 enum {
44 
45 	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
46 	TRACE_FUNC_OPT_STACK		= 0x1,
47 	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
48 	TRACE_FUNC_OPT_ARGS		= 0x4,
49 
50 	/* Update this to next highest bit. */
51 	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x8
52 };
53 
54 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
55 
ftrace_allocate_ftrace_ops(struct trace_array * tr)56 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
57 {
58 	struct ftrace_ops *ops;
59 
60 	/* The top level array uses the "global_ops" */
61 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
62 		return 0;
63 
64 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
65 	if (!ops)
66 		return -ENOMEM;
67 
68 	/* Currently only the non stack version is supported */
69 	ops->func = function_trace_call;
70 	ops->flags = FTRACE_OPS_FL_PID;
71 
72 	tr->ops = ops;
73 	ops->private = tr;
74 
75 	return 0;
76 }
77 
ftrace_free_ftrace_ops(struct trace_array * tr)78 void ftrace_free_ftrace_ops(struct trace_array *tr)
79 {
80 	kfree(tr->ops);
81 	tr->ops = NULL;
82 }
83 
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)84 int ftrace_create_function_files(struct trace_array *tr,
85 				 struct dentry *parent)
86 {
87 	int ret;
88 	/*
89 	 * The top level array uses the "global_ops", and the files are
90 	 * created on boot up.
91 	 */
92 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
93 		return 0;
94 
95 	if (!tr->ops)
96 		return -EINVAL;
97 
98 	ret = allocate_fgraph_ops(tr, tr->ops);
99 	if (ret) {
100 		kfree(tr->ops);
101 		return ret;
102 	}
103 
104 	ftrace_create_filter_files(tr->ops, parent);
105 
106 	return 0;
107 }
108 
ftrace_destroy_function_files(struct trace_array * tr)109 void ftrace_destroy_function_files(struct trace_array *tr)
110 {
111 	ftrace_destroy_filter_files(tr->ops);
112 	ftrace_free_ftrace_ops(tr);
113 	free_fgraph_ops(tr);
114 }
115 
select_trace_function(u32 flags_val)116 static ftrace_func_t select_trace_function(u32 flags_val)
117 {
118 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
119 	case TRACE_FUNC_NO_OPTS:
120 		return function_trace_call;
121 	case TRACE_FUNC_OPT_ARGS:
122 		return function_args_trace_call;
123 	case TRACE_FUNC_OPT_STACK:
124 		return function_stack_trace_call;
125 	case TRACE_FUNC_OPT_NO_REPEATS:
126 		return function_no_repeats_trace_call;
127 	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
128 		return function_stack_no_repeats_trace_call;
129 	default:
130 		return NULL;
131 	}
132 }
133 
handle_func_repeats(struct trace_array * tr,u32 flags_val)134 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
135 {
136 	if (!tr->last_func_repeats &&
137 	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
138 		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
139 		if (!tr->last_func_repeats)
140 			return false;
141 	}
142 
143 	return true;
144 }
145 
function_trace_init(struct trace_array * tr)146 static int function_trace_init(struct trace_array *tr)
147 {
148 	ftrace_func_t func;
149 	/*
150 	 * Instance trace_arrays get their ops allocated
151 	 * at instance creation. Unless it failed
152 	 * the allocation.
153 	 */
154 	if (!tr->ops)
155 		return -ENOMEM;
156 
157 	func = select_trace_function(func_flags.val);
158 	if (!func)
159 		return -EINVAL;
160 
161 	if (!handle_func_repeats(tr, func_flags.val))
162 		return -ENOMEM;
163 
164 	ftrace_init_array_ops(tr, func);
165 
166 	tr->array_buffer.cpu = raw_smp_processor_id();
167 
168 	tracing_start_cmdline_record();
169 	tracing_start_function_trace(tr);
170 	return 0;
171 }
172 
function_trace_reset(struct trace_array * tr)173 static void function_trace_reset(struct trace_array *tr)
174 {
175 	tracing_stop_function_trace(tr);
176 	tracing_stop_cmdline_record();
177 	ftrace_reset_array_ops(tr);
178 }
179 
function_trace_start(struct trace_array * tr)180 static void function_trace_start(struct trace_array *tr)
181 {
182 	tracing_reset_online_cpus(&tr->array_buffer);
183 }
184 
185 /* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
186 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
187 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)188 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
189 {
190 	unsigned long true_parent_ip;
191 	int idx = 0;
192 
193 	true_parent_ip = parent_ip;
194 	if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
195 		true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
196 				(unsigned long *)ftrace_regs_get_stack_pointer(fregs));
197 	return true_parent_ip;
198 }
199 #else
200 static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip,struct ftrace_regs * fregs)201 function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
202 {
203 	return parent_ip;
204 }
205 #endif
206 
207 static void
function_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)208 function_trace_call(unsigned long ip, unsigned long parent_ip,
209 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
210 {
211 	struct trace_array *tr = op->private;
212 	struct trace_array_cpu *data;
213 	unsigned int trace_ctx;
214 	int bit;
215 
216 	if (unlikely(!tr->function_enabled))
217 		return;
218 
219 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
220 	if (bit < 0)
221 		return;
222 
223 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
224 
225 	trace_ctx = tracing_gen_ctx_dec();
226 
227 	data = this_cpu_ptr(tr->array_buffer.data);
228 	if (!atomic_read(&data->disabled))
229 		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
230 
231 	ftrace_test_recursion_unlock(bit);
232 }
233 
234 static void
function_args_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)235 function_args_trace_call(unsigned long ip, unsigned long parent_ip,
236 			 struct ftrace_ops *op, struct ftrace_regs *fregs)
237 {
238 	struct trace_array *tr = op->private;
239 	struct trace_array_cpu *data;
240 	unsigned int trace_ctx;
241 	int bit;
242 	int cpu;
243 
244 	if (unlikely(!tr->function_enabled))
245 		return;
246 
247 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
248 	if (bit < 0)
249 		return;
250 
251 	trace_ctx = tracing_gen_ctx();
252 
253 	cpu = smp_processor_id();
254 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
255 	if (!atomic_read(&data->disabled))
256 		trace_function(tr, ip, parent_ip, trace_ctx, fregs);
257 
258 	ftrace_test_recursion_unlock(bit);
259 }
260 
261 #ifdef CONFIG_UNWINDER_ORC
262 /*
263  * Skip 2:
264  *
265  *   function_stack_trace_call()
266  *   ftrace_call()
267  */
268 #define STACK_SKIP 2
269 #else
270 /*
271  * Skip 3:
272  *   __trace_stack()
273  *   function_stack_trace_call()
274  *   ftrace_call()
275  */
276 #define STACK_SKIP 3
277 #endif
278 
279 static void
function_stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)280 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
281 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
282 {
283 	struct trace_array *tr = op->private;
284 	struct trace_array_cpu *data;
285 	unsigned long flags;
286 	long disabled;
287 	int cpu;
288 	unsigned int trace_ctx;
289 	int skip = STACK_SKIP;
290 
291 	if (unlikely(!tr->function_enabled))
292 		return;
293 
294 	/*
295 	 * Need to use raw, since this must be called before the
296 	 * recursive protection is performed.
297 	 */
298 	local_irq_save(flags);
299 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
300 	cpu = raw_smp_processor_id();
301 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
302 	disabled = atomic_inc_return(&data->disabled);
303 
304 	if (likely(disabled == 1)) {
305 		trace_ctx = tracing_gen_ctx_flags(flags);
306 		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
307 #ifdef CONFIG_UNWINDER_FRAME_POINTER
308 		if (ftrace_pids_enabled(op))
309 			skip++;
310 #endif
311 		__trace_stack(tr, trace_ctx, skip);
312 	}
313 
314 	atomic_dec(&data->disabled);
315 	local_irq_restore(flags);
316 }
317 
is_repeat_check(struct trace_array * tr,struct trace_func_repeats * last_info,unsigned long ip,unsigned long parent_ip)318 static inline bool is_repeat_check(struct trace_array *tr,
319 				   struct trace_func_repeats *last_info,
320 				   unsigned long ip, unsigned long parent_ip)
321 {
322 	if (last_info->ip == ip &&
323 	    last_info->parent_ip == parent_ip &&
324 	    last_info->count < U16_MAX) {
325 		last_info->ts_last_call =
326 			ring_buffer_time_stamp(tr->array_buffer.buffer);
327 		last_info->count++;
328 		return true;
329 	}
330 
331 	return false;
332 }
333 
process_repeats(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,struct trace_func_repeats * last_info,unsigned int trace_ctx)334 static inline void process_repeats(struct trace_array *tr,
335 				   unsigned long ip, unsigned long parent_ip,
336 				   struct trace_func_repeats *last_info,
337 				   unsigned int trace_ctx)
338 {
339 	if (last_info->count) {
340 		trace_last_func_repeats(tr, last_info, trace_ctx);
341 		last_info->count = 0;
342 	}
343 
344 	last_info->ip = ip;
345 	last_info->parent_ip = parent_ip;
346 }
347 
348 static void
function_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)349 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
350 			       struct ftrace_ops *op,
351 			       struct ftrace_regs *fregs)
352 {
353 	struct trace_func_repeats *last_info;
354 	struct trace_array *tr = op->private;
355 	struct trace_array_cpu *data;
356 	unsigned int trace_ctx;
357 	int bit;
358 
359 	if (unlikely(!tr->function_enabled))
360 		return;
361 
362 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
363 	if (bit < 0)
364 		return;
365 
366 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
367 	data = this_cpu_ptr(tr->array_buffer.data);
368 	if (atomic_read(&data->disabled))
369 		goto out;
370 
371 	/*
372 	 * An interrupt may happen at any place here. But as far as I can see,
373 	 * the only damage that this can cause is to mess up the repetition
374 	 * counter without valuable data being lost.
375 	 * TODO: think about a solution that is better than just hoping to be
376 	 * lucky.
377 	 */
378 	last_info = this_cpu_ptr(tr->last_func_repeats);
379 	if (is_repeat_check(tr, last_info, ip, parent_ip))
380 		goto out;
381 
382 	trace_ctx = tracing_gen_ctx_dec();
383 	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
384 
385 	trace_function(tr, ip, parent_ip, trace_ctx, NULL);
386 
387 out:
388 	ftrace_test_recursion_unlock(bit);
389 }
390 
391 static void
function_stack_no_repeats_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)392 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
393 				     struct ftrace_ops *op,
394 				     struct ftrace_regs *fregs)
395 {
396 	struct trace_func_repeats *last_info;
397 	struct trace_array *tr = op->private;
398 	struct trace_array_cpu *data;
399 	unsigned long flags;
400 	long disabled;
401 	int cpu;
402 	unsigned int trace_ctx;
403 
404 	if (unlikely(!tr->function_enabled))
405 		return;
406 
407 	/*
408 	 * Need to use raw, since this must be called before the
409 	 * recursive protection is performed.
410 	 */
411 	local_irq_save(flags);
412 	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
413 	cpu = raw_smp_processor_id();
414 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
415 	disabled = atomic_inc_return(&data->disabled);
416 
417 	if (likely(disabled == 1)) {
418 		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
419 		if (is_repeat_check(tr, last_info, ip, parent_ip))
420 			goto out;
421 
422 		trace_ctx = tracing_gen_ctx_flags(flags);
423 		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
424 
425 		trace_function(tr, ip, parent_ip, trace_ctx, NULL);
426 		__trace_stack(tr, trace_ctx, STACK_SKIP);
427 	}
428 
429  out:
430 	atomic_dec(&data->disabled);
431 	local_irq_restore(flags);
432 }
433 
434 static struct tracer_opt func_opts[] = {
435 #ifdef CONFIG_STACKTRACE
436 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
437 #endif
438 	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
439 #ifdef CONFIG_FUNCTION_TRACE_ARGS
440 	{ TRACER_OPT(func-args, TRACE_FUNC_OPT_ARGS) },
441 #endif
442 	{ } /* Always set a last empty entry */
443 };
444 
445 static struct tracer_flags func_flags = {
446 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
447 	.opts = func_opts
448 };
449 
tracing_start_function_trace(struct trace_array * tr)450 static void tracing_start_function_trace(struct trace_array *tr)
451 {
452 	tr->function_enabled = 0;
453 	register_ftrace_function(tr->ops);
454 	tr->function_enabled = 1;
455 }
456 
tracing_stop_function_trace(struct trace_array * tr)457 static void tracing_stop_function_trace(struct trace_array *tr)
458 {
459 	tr->function_enabled = 0;
460 	unregister_ftrace_function(tr->ops);
461 }
462 
463 static struct tracer function_trace;
464 
465 static int
func_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)466 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
467 {
468 	ftrace_func_t func;
469 	u32 new_flags;
470 
471 	/* Do nothing if already set. */
472 	if (!!set == !!(func_flags.val & bit))
473 		return 0;
474 
475 	/* We can change this flag only when not running. */
476 	if (tr->current_trace != &function_trace)
477 		return 0;
478 
479 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
480 	func = select_trace_function(new_flags);
481 	if (!func)
482 		return -EINVAL;
483 
484 	/* Check if there's anything to change. */
485 	if (tr->ops->func == func)
486 		return 0;
487 
488 	if (!handle_func_repeats(tr, new_flags))
489 		return -ENOMEM;
490 
491 	unregister_ftrace_function(tr->ops);
492 	tr->ops->func = func;
493 	register_ftrace_function(tr->ops);
494 
495 	return 0;
496 }
497 
498 static struct tracer function_trace __tracer_data =
499 {
500 	.name		= "function",
501 	.init		= function_trace_init,
502 	.reset		= function_trace_reset,
503 	.start		= function_trace_start,
504 	.flags		= &func_flags,
505 	.set_flag	= func_set_flag,
506 	.allow_instances = true,
507 #ifdef CONFIG_FTRACE_SELFTEST
508 	.selftest	= trace_selftest_startup_function,
509 #endif
510 };
511 
512 #ifdef CONFIG_DYNAMIC_FTRACE
update_traceon_count(struct ftrace_probe_ops * ops,unsigned long ip,struct trace_array * tr,bool on,void * data)513 static void update_traceon_count(struct ftrace_probe_ops *ops,
514 				 unsigned long ip,
515 				 struct trace_array *tr, bool on,
516 				 void *data)
517 {
518 	struct ftrace_func_mapper *mapper = data;
519 	long *count;
520 	long old_count;
521 
522 	/*
523 	 * Tracing gets disabled (or enabled) once per count.
524 	 * This function can be called at the same time on multiple CPUs.
525 	 * It is fine if both disable (or enable) tracing, as disabling
526 	 * (or enabling) the second time doesn't do anything as the
527 	 * state of the tracer is already disabled (or enabled).
528 	 * What needs to be synchronized in this case is that the count
529 	 * only gets decremented once, even if the tracer is disabled
530 	 * (or enabled) twice, as the second one is really a nop.
531 	 *
532 	 * The memory barriers guarantee that we only decrement the
533 	 * counter once. First the count is read to a local variable
534 	 * and a read barrier is used to make sure that it is loaded
535 	 * before checking if the tracer is in the state we want.
536 	 * If the tracer is not in the state we want, then the count
537 	 * is guaranteed to be the old count.
538 	 *
539 	 * Next the tracer is set to the state we want (disabled or enabled)
540 	 * then a write memory barrier is used to make sure that
541 	 * the new state is visible before changing the counter by
542 	 * one minus the old counter. This guarantees that another CPU
543 	 * executing this code will see the new state before seeing
544 	 * the new counter value, and would not do anything if the new
545 	 * counter is seen.
546 	 *
547 	 * Note, there is no synchronization between this and a user
548 	 * setting the tracing_on file. But we currently don't care
549 	 * about that.
550 	 */
551 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
552 	old_count = *count;
553 
554 	if (old_count <= 0)
555 		return;
556 
557 	/* Make sure we see count before checking tracing state */
558 	smp_rmb();
559 
560 	if (on == !!tracer_tracing_is_on(tr))
561 		return;
562 
563 	if (on)
564 		tracer_tracing_on(tr);
565 	else
566 		tracer_tracing_off(tr);
567 
568 	/* Make sure tracing state is visible before updating count */
569 	smp_wmb();
570 
571 	*count = old_count - 1;
572 }
573 
574 static void
ftrace_traceon_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)575 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
576 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
577 		     void *data)
578 {
579 	update_traceon_count(ops, ip, tr, 1, data);
580 }
581 
582 static void
ftrace_traceoff_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)583 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
584 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
585 		      void *data)
586 {
587 	update_traceon_count(ops, ip, tr, 0, data);
588 }
589 
590 static void
ftrace_traceon(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)591 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
592 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
593 	       void *data)
594 {
595 	if (tracer_tracing_is_on(tr))
596 		return;
597 
598 	tracer_tracing_on(tr);
599 }
600 
601 static void
ftrace_traceoff(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)602 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
603 		struct trace_array *tr, struct ftrace_probe_ops *ops,
604 		void *data)
605 {
606 	if (!tracer_tracing_is_on(tr))
607 		return;
608 
609 	tracer_tracing_off(tr);
610 }
611 
612 #ifdef CONFIG_UNWINDER_ORC
613 /*
614  * Skip 3:
615  *
616  *   function_trace_probe_call()
617  *   ftrace_ops_assist_func()
618  *   ftrace_call()
619  */
620 #define FTRACE_STACK_SKIP 3
621 #else
622 /*
623  * Skip 5:
624  *
625  *   __trace_stack()
626  *   ftrace_stacktrace()
627  *   function_trace_probe_call()
628  *   ftrace_ops_assist_func()
629  *   ftrace_call()
630  */
631 #define FTRACE_STACK_SKIP 5
632 #endif
633 
trace_stack(struct trace_array * tr)634 static __always_inline void trace_stack(struct trace_array *tr)
635 {
636 	unsigned int trace_ctx;
637 
638 	trace_ctx = tracing_gen_ctx();
639 
640 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
641 }
642 
643 static void
ftrace_stacktrace(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)644 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
645 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
646 		  void *data)
647 {
648 	trace_stack(tr);
649 }
650 
651 static void
ftrace_stacktrace_count(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)652 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
653 			struct trace_array *tr, struct ftrace_probe_ops *ops,
654 			void *data)
655 {
656 	struct ftrace_func_mapper *mapper = data;
657 	long *count;
658 	long old_count;
659 	long new_count;
660 
661 	if (!tracing_is_on())
662 		return;
663 
664 	/* unlimited? */
665 	if (!mapper) {
666 		trace_stack(tr);
667 		return;
668 	}
669 
670 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
671 
672 	/*
673 	 * Stack traces should only execute the number of times the
674 	 * user specified in the counter.
675 	 */
676 	do {
677 		old_count = *count;
678 
679 		if (!old_count)
680 			return;
681 
682 		new_count = old_count - 1;
683 		new_count = cmpxchg(count, old_count, new_count);
684 		if (new_count == old_count)
685 			trace_stack(tr);
686 
687 		if (!tracing_is_on())
688 			return;
689 
690 	} while (new_count != old_count);
691 }
692 
update_count(struct ftrace_probe_ops * ops,unsigned long ip,void * data)693 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
694 			void *data)
695 {
696 	struct ftrace_func_mapper *mapper = data;
697 	long *count = NULL;
698 
699 	if (mapper)
700 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
701 
702 	if (count) {
703 		if (*count <= 0)
704 			return 0;
705 		(*count)--;
706 	}
707 
708 	return 1;
709 }
710 
711 static void
ftrace_dump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)712 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
713 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
714 		  void *data)
715 {
716 	if (update_count(ops, ip, data))
717 		ftrace_dump(DUMP_ALL);
718 }
719 
720 /* Only dump the current CPU buffer. */
721 static void
ftrace_cpudump_probe(unsigned long ip,unsigned long parent_ip,struct trace_array * tr,struct ftrace_probe_ops * ops,void * data)722 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
723 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
724 		     void *data)
725 {
726 	if (update_count(ops, ip, data))
727 		ftrace_dump(DUMP_ORIG);
728 }
729 
730 static int
ftrace_probe_print(const char * name,struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)731 ftrace_probe_print(const char *name, struct seq_file *m,
732 		   unsigned long ip, struct ftrace_probe_ops *ops,
733 		   void *data)
734 {
735 	struct ftrace_func_mapper *mapper = data;
736 	long *count = NULL;
737 
738 	seq_printf(m, "%ps:%s", (void *)ip, name);
739 
740 	if (mapper)
741 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
742 
743 	if (count)
744 		seq_printf(m, ":count=%ld\n", *count);
745 	else
746 		seq_puts(m, ":unlimited\n");
747 
748 	return 0;
749 }
750 
751 static int
ftrace_traceon_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)752 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
753 		     struct ftrace_probe_ops *ops,
754 		     void *data)
755 {
756 	return ftrace_probe_print("traceon", m, ip, ops, data);
757 }
758 
759 static int
ftrace_traceoff_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)760 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
761 			 struct ftrace_probe_ops *ops, void *data)
762 {
763 	return ftrace_probe_print("traceoff", m, ip, ops, data);
764 }
765 
766 static int
ftrace_stacktrace_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)767 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
768 			struct ftrace_probe_ops *ops, void *data)
769 {
770 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
771 }
772 
773 static int
ftrace_dump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)774 ftrace_dump_print(struct seq_file *m, unsigned long ip,
775 			struct ftrace_probe_ops *ops, void *data)
776 {
777 	return ftrace_probe_print("dump", m, ip, ops, data);
778 }
779 
780 static int
ftrace_cpudump_print(struct seq_file * m,unsigned long ip,struct ftrace_probe_ops * ops,void * data)781 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
782 			struct ftrace_probe_ops *ops, void *data)
783 {
784 	return ftrace_probe_print("cpudump", m, ip, ops, data);
785 }
786 
787 
788 static int
ftrace_count_init(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * init_data,void ** data)789 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
790 		  unsigned long ip, void *init_data, void **data)
791 {
792 	struct ftrace_func_mapper *mapper = *data;
793 
794 	if (!mapper) {
795 		mapper = allocate_ftrace_func_mapper();
796 		if (!mapper)
797 			return -ENOMEM;
798 		*data = mapper;
799 	}
800 
801 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
802 }
803 
804 static void
ftrace_count_free(struct ftrace_probe_ops * ops,struct trace_array * tr,unsigned long ip,void * data)805 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
806 		  unsigned long ip, void *data)
807 {
808 	struct ftrace_func_mapper *mapper = data;
809 
810 	if (!ip) {
811 		free_ftrace_func_mapper(mapper, NULL);
812 		return;
813 	}
814 
815 	ftrace_func_mapper_remove_ip(mapper, ip);
816 }
817 
818 static struct ftrace_probe_ops traceon_count_probe_ops = {
819 	.func			= ftrace_traceon_count,
820 	.print			= ftrace_traceon_print,
821 	.init			= ftrace_count_init,
822 	.free			= ftrace_count_free,
823 };
824 
825 static struct ftrace_probe_ops traceoff_count_probe_ops = {
826 	.func			= ftrace_traceoff_count,
827 	.print			= ftrace_traceoff_print,
828 	.init			= ftrace_count_init,
829 	.free			= ftrace_count_free,
830 };
831 
832 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
833 	.func			= ftrace_stacktrace_count,
834 	.print			= ftrace_stacktrace_print,
835 	.init			= ftrace_count_init,
836 	.free			= ftrace_count_free,
837 };
838 
839 static struct ftrace_probe_ops dump_probe_ops = {
840 	.func			= ftrace_dump_probe,
841 	.print			= ftrace_dump_print,
842 	.init			= ftrace_count_init,
843 	.free			= ftrace_count_free,
844 };
845 
846 static struct ftrace_probe_ops cpudump_probe_ops = {
847 	.func			= ftrace_cpudump_probe,
848 	.print			= ftrace_cpudump_print,
849 };
850 
851 static struct ftrace_probe_ops traceon_probe_ops = {
852 	.func			= ftrace_traceon,
853 	.print			= ftrace_traceon_print,
854 };
855 
856 static struct ftrace_probe_ops traceoff_probe_ops = {
857 	.func			= ftrace_traceoff,
858 	.print			= ftrace_traceoff_print,
859 };
860 
861 static struct ftrace_probe_ops stacktrace_probe_ops = {
862 	.func			= ftrace_stacktrace,
863 	.print			= ftrace_stacktrace_print,
864 };
865 
866 static int
ftrace_trace_probe_callback(struct trace_array * tr,struct ftrace_probe_ops * ops,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)867 ftrace_trace_probe_callback(struct trace_array *tr,
868 			    struct ftrace_probe_ops *ops,
869 			    struct ftrace_hash *hash, char *glob,
870 			    char *cmd, char *param, int enable)
871 {
872 	void *count = (void *)-1;
873 	char *number;
874 	int ret;
875 
876 	/* hash funcs only work with set_ftrace_filter */
877 	if (!enable)
878 		return -EINVAL;
879 
880 	if (glob[0] == '!')
881 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
882 
883 	if (!param)
884 		goto out_reg;
885 
886 	number = strsep(&param, ":");
887 
888 	if (!strlen(number))
889 		goto out_reg;
890 
891 	/*
892 	 * We use the callback data field (which is a pointer)
893 	 * as our counter.
894 	 */
895 	ret = kstrtoul(number, 0, (unsigned long *)&count);
896 	if (ret)
897 		return ret;
898 
899  out_reg:
900 	ret = register_ftrace_function_probe(glob, tr, ops, count);
901 
902 	return ret < 0 ? ret : 0;
903 }
904 
905 static int
ftrace_trace_onoff_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)906 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
907 			    char *glob, char *cmd, char *param, int enable)
908 {
909 	struct ftrace_probe_ops *ops;
910 
911 	if (!tr)
912 		return -ENODEV;
913 
914 	/* we register both traceon and traceoff to this callback */
915 	if (strcmp(cmd, "traceon") == 0)
916 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
917 	else
918 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
919 
920 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
921 					   param, enable);
922 }
923 
924 static int
ftrace_stacktrace_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)925 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
926 			   char *glob, char *cmd, char *param, int enable)
927 {
928 	struct ftrace_probe_ops *ops;
929 
930 	if (!tr)
931 		return -ENODEV;
932 
933 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
934 
935 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
936 					   param, enable);
937 }
938 
939 static int
ftrace_dump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)940 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
941 			   char *glob, char *cmd, char *param, int enable)
942 {
943 	struct ftrace_probe_ops *ops;
944 
945 	if (!tr)
946 		return -ENODEV;
947 
948 	ops = &dump_probe_ops;
949 
950 	/* Only dump once. */
951 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
952 					   "1", enable);
953 }
954 
955 static int
ftrace_cpudump_callback(struct trace_array * tr,struct ftrace_hash * hash,char * glob,char * cmd,char * param,int enable)956 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
957 			   char *glob, char *cmd, char *param, int enable)
958 {
959 	struct ftrace_probe_ops *ops;
960 
961 	if (!tr)
962 		return -ENODEV;
963 
964 	ops = &cpudump_probe_ops;
965 
966 	/* Only dump once. */
967 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
968 					   "1", enable);
969 }
970 
971 static struct ftrace_func_command ftrace_traceon_cmd = {
972 	.name			= "traceon",
973 	.func			= ftrace_trace_onoff_callback,
974 };
975 
976 static struct ftrace_func_command ftrace_traceoff_cmd = {
977 	.name			= "traceoff",
978 	.func			= ftrace_trace_onoff_callback,
979 };
980 
981 static struct ftrace_func_command ftrace_stacktrace_cmd = {
982 	.name			= "stacktrace",
983 	.func			= ftrace_stacktrace_callback,
984 };
985 
986 static struct ftrace_func_command ftrace_dump_cmd = {
987 	.name			= "dump",
988 	.func			= ftrace_dump_callback,
989 };
990 
991 static struct ftrace_func_command ftrace_cpudump_cmd = {
992 	.name			= "cpudump",
993 	.func			= ftrace_cpudump_callback,
994 };
995 
init_func_cmd_traceon(void)996 static int __init init_func_cmd_traceon(void)
997 {
998 	int ret;
999 
1000 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
1001 	if (ret)
1002 		return ret;
1003 
1004 	ret = register_ftrace_command(&ftrace_traceon_cmd);
1005 	if (ret)
1006 		goto out_free_traceoff;
1007 
1008 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
1009 	if (ret)
1010 		goto out_free_traceon;
1011 
1012 	ret = register_ftrace_command(&ftrace_dump_cmd);
1013 	if (ret)
1014 		goto out_free_stacktrace;
1015 
1016 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
1017 	if (ret)
1018 		goto out_free_dump;
1019 
1020 	return 0;
1021 
1022  out_free_dump:
1023 	unregister_ftrace_command(&ftrace_dump_cmd);
1024  out_free_stacktrace:
1025 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
1026  out_free_traceon:
1027 	unregister_ftrace_command(&ftrace_traceon_cmd);
1028  out_free_traceoff:
1029 	unregister_ftrace_command(&ftrace_traceoff_cmd);
1030 
1031 	return ret;
1032 }
1033 #else
init_func_cmd_traceon(void)1034 static inline int init_func_cmd_traceon(void)
1035 {
1036 	return 0;
1037 }
1038 #endif /* CONFIG_DYNAMIC_FTRACE */
1039 
init_function_trace(void)1040 __init int init_function_trace(void)
1041 {
1042 	init_func_cmd_traceon();
1043 	return register_tracer(&function_trace);
1044 }
1045