trace_functions.c (666fab4a3ea143315a9c059fad9f3a0f1365d54b) | trace_functions.c (d19ad0775dcd64b49eecf4fa79c17959ebfbd26b) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Based on code from the latency_tracer, that is: --- 9 unchanged lines hidden (view full) --- 18#include <linux/fs.h> 19 20#include "trace.h" 21 22static void tracing_start_function_trace(struct trace_array *tr); 23static void tracing_stop_function_trace(struct trace_array *tr); 24static void 25function_trace_call(unsigned long ip, unsigned long parent_ip, | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Based on code from the latency_tracer, that is: --- 9 unchanged lines hidden (view full) --- 18#include <linux/fs.h> 19 20#include "trace.h" 21 22static void tracing_start_function_trace(struct trace_array *tr); 23static void tracing_stop_function_trace(struct trace_array *tr); 24static void 25function_trace_call(unsigned long ip, unsigned long parent_ip, |
26 struct ftrace_ops *op, struct pt_regs *pt_regs); | 26 struct ftrace_ops *op, struct ftrace_regs *fregs); |
27static void 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 27static void 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
29 struct ftrace_ops *op, struct pt_regs *pt_regs); | 29 struct ftrace_ops *op, struct ftrace_regs *fregs); |
30static struct tracer_flags func_flags; 31 32/* Our option */ 33enum { 34 TRACE_FUNC_OPT_STACK = 0x1, 35}; 36 37int ftrace_allocate_ftrace_ops(struct trace_array *tr) --- 5 unchanged lines hidden (view full) --- 43 return 0; 44 45 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 46 if (!ops) 47 return -ENOMEM; 48 49 /* Currently only the non stack version is supported */ 50 ops->func = function_trace_call; | 30static struct tracer_flags func_flags; 31 32/* Our option */ 33enum { 34 TRACE_FUNC_OPT_STACK = 0x1, 35}; 36 37int ftrace_allocate_ftrace_ops(struct trace_array *tr) --- 5 unchanged lines hidden (view full) --- 43 return 0; 44 45 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 46 if (!ops) 47 return -ENOMEM; 48 49 /* Currently only the non stack version is supported */ 50 ops->func = function_trace_call; |
51 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; | 51 ops->flags = FTRACE_OPS_FL_PID; |
52 53 tr->ops = ops; 54 ops->private = tr; 55 56 return 0; 57} 58 59void ftrace_free_ftrace_ops(struct trace_array *tr) --- 24 unchanged lines hidden (view full) --- 84{ 85 ftrace_destroy_filter_files(tr->ops); 86 ftrace_free_ftrace_ops(tr); 87} 88 89static int function_trace_init(struct trace_array *tr) 90{ 91 ftrace_func_t func; | 52 53 tr->ops = ops; 54 ops->private = tr; 55 56 return 0; 57} 58 59void ftrace_free_ftrace_ops(struct trace_array *tr) --- 24 unchanged lines hidden (view full) --- 84{ 85 ftrace_destroy_filter_files(tr->ops); 86 ftrace_free_ftrace_ops(tr); 87} 88 89static int function_trace_init(struct trace_array *tr) 90{ 91 ftrace_func_t func; |
92 | |
93 /* 94 * Instance trace_arrays get their ops allocated 95 * at instance creation. Unless it failed 96 * the allocation. 97 */ 98 if (!tr->ops) 99 return -ENOMEM; 100 --- 23 unchanged lines hidden (view full) --- 124 125static void function_trace_start(struct trace_array *tr) 126{ 127 tracing_reset_online_cpus(&tr->array_buffer); 128} 129 130static void 131function_trace_call(unsigned long ip, unsigned long parent_ip, | 92 /* 93 * Instance trace_arrays get their ops allocated 94 * at instance creation. Unless it failed 95 * the allocation. 96 */ 97 if (!tr->ops) 98 return -ENOMEM; 99 --- 23 unchanged lines hidden (view full) --- 123 124static void function_trace_start(struct trace_array *tr) 125{ 126 tracing_reset_online_cpus(&tr->array_buffer); 127} 128 129static void 130function_trace_call(unsigned long ip, unsigned long parent_ip, |
132 struct ftrace_ops *op, struct pt_regs *pt_regs) | 131 struct ftrace_ops *op, struct ftrace_regs *fregs) |
133{ 134 struct trace_array *tr = op->private; 135 struct trace_array_cpu *data; 136 unsigned long flags; 137 int bit; 138 int cpu; 139 int pc; 140 141 if (unlikely(!tr->function_enabled)) 142 return; 143 | 132{ 133 struct trace_array *tr = op->private; 134 struct trace_array_cpu *data; 135 unsigned long flags; 136 int bit; 137 int cpu; 138 int pc; 139 140 if (unlikely(!tr->function_enabled)) 141 return; 142 |
143 bit = ftrace_test_recursion_trylock(ip, parent_ip); 144 if (bit < 0) 145 return; 146 |
|
144 pc = preempt_count(); 145 preempt_disable_notrace(); 146 | 147 pc = preempt_count(); 148 preempt_disable_notrace(); 149 |
147 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); 148 if (bit < 0) 149 goto out; 150 | |
151 cpu = smp_processor_id(); 152 data = per_cpu_ptr(tr->array_buffer.data, cpu); 153 if (!atomic_read(&data->disabled)) { 154 local_save_flags(flags); 155 trace_function(tr, ip, parent_ip, flags, pc); 156 } | 150 cpu = smp_processor_id(); 151 data = per_cpu_ptr(tr->array_buffer.data, cpu); 152 if (!atomic_read(&data->disabled)) { 153 local_save_flags(flags); 154 trace_function(tr, ip, parent_ip, flags, pc); 155 } |
157 trace_clear_recursion(bit); 158 159 out: | 156 ftrace_test_recursion_unlock(bit); |
160 preempt_enable_notrace(); 161} 162 163#ifdef CONFIG_UNWINDER_ORC 164/* 165 * Skip 2: 166 * 167 * function_stack_trace_call() --- 7 unchanged lines hidden (view full) --- 175 * function_stack_trace_call() 176 * ftrace_call() 177 */ 178#define STACK_SKIP 3 179#endif 180 181static void 182function_stack_trace_call(unsigned long ip, unsigned long parent_ip, | 157 preempt_enable_notrace(); 158} 159 160#ifdef CONFIG_UNWINDER_ORC 161/* 162 * Skip 2: 163 * 164 * function_stack_trace_call() --- 7 unchanged lines hidden (view full) --- 172 * function_stack_trace_call() 173 * ftrace_call() 174 */ 175#define STACK_SKIP 3 176#endif 177 178static void 179function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
183 struct ftrace_ops *op, struct pt_regs *pt_regs) | 180 struct ftrace_ops *op, struct ftrace_regs *fregs) |
184{ 185 struct trace_array *tr = op->private; 186 struct trace_array_cpu *data; 187 unsigned long flags; 188 long disabled; 189 int cpu; 190 int pc; 191 --- 629 unchanged lines hidden --- | 181{ 182 struct trace_array *tr = op->private; 183 struct trace_array_cpu *data; 184 unsigned long flags; 185 long disabled; 186 int cpu; 187 int pc; 188 --- 629 unchanged lines hidden --- |