ftrace.c (e5c9a223da3c5bb9563ecf4ebe0c32de39dbc620) ftrace.c (e5a971d76d701dbff9e5dbaa84dc9e8c3081a867)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:

--- 146 unchanged lines hidden (view full) ---

155 if (pid != FTRACE_PID_TRACE &&
156 pid != current->pid)
157 return;
158 }
159
160 op->saved_func(ip, parent_ip, op, regs);
161}
162
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:

--- 146 unchanged lines hidden (view full) ---

155 if (pid != FTRACE_PID_TRACE &&
156 pid != current->pid)
157 return;
158 }
159
160 op->saved_func(ip, parent_ip, op, regs);
161}
162
163static void ftrace_sync(struct work_struct *work)
164{
165 /*
166 * This function is just a stub to implement a hard force
167 * of synchronize_rcu(). This requires synchronizing
168 * tasks even in userspace and idle.
169 *
170 * Yes, function tracing is rude.
171 */
172}
173
174static void ftrace_sync_ipi(void *data)
175{
176 /* Probably not needed, but do it anyway */
177 smp_rmb();
178}
179
180static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
181{

--- 69 unchanged lines hidden (view full) ---

251 * function we want, albeit indirectly, but it handles the
252 * ftrace_ops and doesn't depend on function_trace_op.
253 */
254 ftrace_trace_function = ftrace_ops_list_func;
255 /*
256 * Make sure all CPUs see this. Yes this is slow, but static
257 * tracing is slow and nasty to have enabled.
258 */
163static void ftrace_sync_ipi(void *data)
164{
165 /* Probably not needed, but do it anyway */
166 smp_rmb();
167}
168
169static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
170{

--- 69 unchanged lines hidden (view full) ---

240 * function we want, albeit indirectly, but it handles the
241 * ftrace_ops and doesn't depend on function_trace_op.
242 */
243 ftrace_trace_function = ftrace_ops_list_func;
244 /*
245 * Make sure all CPUs see this. Yes this is slow, but static
246 * tracing is slow and nasty to have enabled.
247 */
259 schedule_on_each_cpu(ftrace_sync);
248 synchronize_rcu_tasks_rude();
260 /* Now all cpus are using the list ops. */
261 function_trace_op = set_function_trace_op;
262 /* Make sure the function_trace_op is visible on all CPUs */
263 smp_wmb();
264 /* Nasty way to force a rmb on all cpus */
265 smp_call_function(ftrace_sync_ipi, NULL, 1);
266 /* OK, we are all set to update the ftrace_trace_function now! */
267#endif /* !CONFIG_DYNAMIC_FTRACE */

--- 2659 unchanged lines hidden (view full) ---

2927 /*
2928 * We need to do a hard force of sched synchronization.
2929 * This is because we use preempt_disable() to do RCU, but
2930 * the function tracers can be called where RCU is not watching
2931 * (like before user_exit()). We can not rely on the RCU
2932 * infrastructure to do the synchronization, thus we must do it
2933 * ourselves.
2934 */
249 /* Now all cpus are using the list ops. */
250 function_trace_op = set_function_trace_op;
251 /* Make sure the function_trace_op is visible on all CPUs */
252 smp_wmb();
253 /* Nasty way to force a rmb on all cpus */
254 smp_call_function(ftrace_sync_ipi, NULL, 1);
255 /* OK, we are all set to update the ftrace_trace_function now! */
256#endif /* !CONFIG_DYNAMIC_FTRACE */

--- 2659 unchanged lines hidden (view full) ---

2916 /*
2917 * We need to do a hard force of sched synchronization.
2918 * This is because we use preempt_disable() to do RCU, but
2919 * the function tracers can be called where RCU is not watching
2920 * (like before user_exit()). We can not rely on the RCU
2921 * infrastructure to do the synchronization, thus we must do it
2922 * ourselves.
2923 */
2935 schedule_on_each_cpu(ftrace_sync);
2924 synchronize_rcu_tasks_rude();
2936
2937 /*
2938 * When the kernel is preeptive, tasks can be preempted
2939 * while on a ftrace trampoline. Just scheduling a task on
2940 * a CPU is not good enough to flush them. Calling
2941 * synchornize_rcu_tasks() will wait for those tasks to
2942 * execute and either schedule voluntarily or enter user space.
2943 */

--- 2216 unchanged lines hidden (view full) ---

5160 if (!WARN_ON(!direct)) {
5161 /* This is the good path (see the ! before WARN) */
5162 direct->count--;
5163 WARN_ON(direct->count < 0);
5164 if (!direct->count) {
5165 list_del_rcu(&direct->next);
5166 synchronize_rcu_tasks();
5167 kfree(direct);
2925
2926 /*
2927 * When the kernel is preeptive, tasks can be preempted
2928 * while on a ftrace trampoline. Just scheduling a task on
2929 * a CPU is not good enough to flush them. Calling
2930 * synchornize_rcu_tasks() will wait for those tasks to
2931 * execute and either schedule voluntarily or enter user space.
2932 */

--- 2216 unchanged lines hidden (view full) ---

5149 if (!WARN_ON(!direct)) {
5150 /* This is the good path (see the ! before WARN) */
5151 direct->count--;
5152 WARN_ON(direct->count < 0);
5153 if (!direct->count) {
5154 list_del_rcu(&direct->next);
5155 synchronize_rcu_tasks();
5156 kfree(direct);
5168 kfree(entry);
5169 ftrace_direct_func_count--;
5170 }
5171 }
5172 out_unlock:
5173 mutex_unlock(&direct_mutex);
5174
5175 return ret;
5176}

--- 706 unchanged lines hidden (view full) ---

5883 /*
5884 * We need to do a hard force of sched synchronization.
5885 * This is because we use preempt_disable() to do RCU, but
5886 * the function tracers can be called where RCU is not watching
5887 * (like before user_exit()). We can not rely on the RCU
5888 * infrastructure to do the synchronization, thus we must do it
5889 * ourselves.
5890 */
5157 ftrace_direct_func_count--;
5158 }
5159 }
5160 out_unlock:
5161 mutex_unlock(&direct_mutex);
5162
5163 return ret;
5164}

--- 706 unchanged lines hidden (view full) ---

5871 /*
5872 * We need to do a hard force of sched synchronization.
5873 * This is because we use preempt_disable() to do RCU, but
5874 * the function tracers can be called where RCU is not watching
5875 * (like before user_exit()). We can not rely on the RCU
5876 * infrastructure to do the synchronization, thus we must do it
5877 * ourselves.
5878 */
5891 schedule_on_each_cpu(ftrace_sync);
5879 synchronize_rcu_tasks_rude();
5892
5893 free_ftrace_hash(old_hash);
5894 }
5895
5896 out:
5897 free_ftrace_hash(fgd->new_hash);
5898 kfree(fgd);
5899

--- 1566 unchanged lines hidden ---
5880
5881 free_ftrace_hash(old_hash);
5882 }
5883
5884 out:
5885 free_ftrace_hash(fgd->new_hash);
5886 kfree(fgd);
5887

--- 1566 unchanged lines hidden ---