ftrace.c (186a73dc9a81b087d0708f21a990615957ec9c1c) ftrace.c (bdffd893a0e9c431304142d12d9a0a21d365c502)
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>

--- 48 unchanged lines hidden (view full) ---

57 })
58
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
64
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>

--- 48 unchanged lines hidden (view full) ---

57 })
58
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73

--- 24 unchanged lines hidden (view full) ---

98/*
99 * ftrace_disabled is set when an anomaly is discovered.
100 * ftrace_disabled is much stronger than ftrace_enabled.
101 */
102static int ftrace_disabled __read_mostly;
103
104static DEFINE_MUTEX(ftrace_lock);
105
66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70#else
71#define INIT_REGEX_LOCK(opsname)
72#endif
73

--- 24 unchanged lines hidden (view full) ---

98/*
99 * ftrace_disabled is set when an anomaly is discovered.
100 * ftrace_disabled is much stronger than ftrace_enabled.
101 */
102static int ftrace_disabled __read_mostly;
103
104static DEFINE_MUTEX(ftrace_lock);
105
106static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
107static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
108static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
109ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
110ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
111static struct ftrace_ops global_ops;
112static struct ftrace_ops control_ops;
113
114#if ARCH_SUPPORTS_FTRACE_OPS

--- 51 unchanged lines hidden (view full) ---

166 ops != &ftrace_list_end; ops = ops->next)
167 cnt++;
168
169 mutex_unlock(&ftrace_lock);
170
171 return cnt;
172}
173
106static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
107static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
108ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
109ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
110static struct ftrace_ops global_ops;
111static struct ftrace_ops control_ops;
112
113#if ARCH_SUPPORTS_FTRACE_OPS

--- 51 unchanged lines hidden (view full) ---

165 ops != &ftrace_list_end; ops = ops->next)
166 cnt++;
167
168 mutex_unlock(&ftrace_lock);
169
170 return cnt;
171}
172
174static void
175ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op, struct pt_regs *regs)
177{
178 int bit;
179
180 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 if (bit < 0)
182 return;
183
184 do_for_each_ftrace_op(op, ftrace_global_list) {
185 op->func(ip, parent_ip, op, regs);
186 } while_for_each_ftrace_op(op);
187
188 trace_clear_recursion(bit);
189}
190
191static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
192 struct ftrace_ops *op, struct pt_regs *regs)
193{
194 if (!test_tsk_trace_trace(current))
195 return;
196
197 ftrace_pid_function(ip, parent_ip, op, regs);
198}

--- 33 unchanged lines hidden (view full) ---

232 if (!disabled)
233 return -ENOMEM;
234
235 ops->disabled = disabled;
236 control_ops_disable_all(ops);
237 return 0;
238}
239
173static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
174 struct ftrace_ops *op, struct pt_regs *regs)
175{
176 if (!test_tsk_trace_trace(current))
177 return;
178
179 ftrace_pid_function(ip, parent_ip, op, regs);
180}

--- 33 unchanged lines hidden (view full) ---

214 if (!disabled)
215 return -ENOMEM;
216
217 ops->disabled = disabled;
218 control_ops_disable_all(ops);
219 return 0;
220}
221
240static void update_global_ops(void)
241{
242 ftrace_func_t func = ftrace_global_list_func;
243 void *private = NULL;
244
245 /* The list has its own recursion protection. */
246 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
247
248 /*
249 * If there's only one function registered, then call that
250 * function directly. Otherwise, we need to iterate over the
251 * registered callers.
252 */
253 if (ftrace_global_list == &ftrace_list_end ||
254 ftrace_global_list->next == &ftrace_list_end) {
255 func = ftrace_global_list->func;
256 private = ftrace_global_list->private;
257 /*
258 * As we are calling the function directly.
259 * If it does not have recursion protection,
260 * the function_trace_op needs to be updated
261 * accordingly.
262 */
263 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
264 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265 }
266
267 /* If we filter on pids, update to use the pid function */
268 if (!list_empty(&ftrace_pids)) {
269 set_ftrace_pid_function(func);
270 func = ftrace_pid_func;
271 }
272
273 global_ops.func = func;
274 global_ops.private = private;
275}
276
277static void ftrace_sync(struct work_struct *work)
278{
279 /*
280 * This function is just a stub to implement a hard force
281 * of synchronize_sched(). This requires synchronizing
282 * tasks even in userspace and idle.
283 *
284 * Yes, function tracing is rude.

--- 11 unchanged lines hidden (view full) ---

296#else
297static inline void update_function_graph_func(void) { }
298#endif
299
300static void update_ftrace_function(void)
301{
302 ftrace_func_t func;
303
222static void ftrace_sync(struct work_struct *work)
223{
224 /*
225 * This function is just a stub to implement a hard force
226 * of synchronize_sched(). This requires synchronizing
227 * tasks even in userspace and idle.
228 *
229 * Yes, function tracing is rude.

--- 11 unchanged lines hidden (view full) ---

241#else
242static inline void update_function_graph_func(void) { }
243#endif
244
245static void update_ftrace_function(void)
246{
247 ftrace_func_t func;
248
304 update_global_ops();
305
306 /*
307 * If we are at the end of the list and this ops is
308 * recursion safe and not dynamic and the arch supports passing ops,
309 * then have the mcount trampoline call the function directly.
310 */
311 if (ftrace_ops_list == &ftrace_list_end ||
312 (ftrace_ops_list->next == &ftrace_list_end &&
313 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
314 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
315 !FTRACE_FORCE_LIST_FUNC)) {
316 /* Set the ftrace_ops that the arch callback uses */
249 /*
250 * If we are at the end of the list and this ops is
251 * recursion safe and not dynamic and the arch supports passing ops,
252 * then have the mcount trampoline call the function directly.
253 */
254 if (ftrace_ops_list == &ftrace_list_end ||
255 (ftrace_ops_list->next == &ftrace_list_end &&
256 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
257 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
258 !FTRACE_FORCE_LIST_FUNC)) {
259 /* Set the ftrace_ops that the arch callback uses */
317 if (ftrace_ops_list == &global_ops)
318 set_function_trace_op = ftrace_global_list;
319 else
320 set_function_trace_op = ftrace_ops_list;
260 set_function_trace_op = ftrace_ops_list;
321 func = ftrace_ops_list->func;
322 } else {
323 /* Just use the default ftrace_ops */
324 set_function_trace_op = &ftrace_list_end;
325 func = ftrace_ops_list_func;
326 }
327
328 /* If there's no change, then do nothing more here */

--- 39 unchanged lines hidden (view full) ---

368 /* Nasty way to force a rmb on all cpus */
369 smp_call_function(ftrace_sync_ipi, NULL, 1);
370 /* OK, we are all set to update the ftrace_trace_function now! */
371#endif /* !CONFIG_DYNAMIC_FTRACE */
372
373 ftrace_trace_function = func;
374}
375
261 func = ftrace_ops_list->func;
262 } else {
263 /* Just use the default ftrace_ops */
264 set_function_trace_op = &ftrace_list_end;
265 func = ftrace_ops_list_func;
266 }
267
268 /* If there's no change, then do nothing more here */

--- 39 unchanged lines hidden (view full) ---

308 /* Nasty way to force a rmb on all cpus */
309 smp_call_function(ftrace_sync_ipi, NULL, 1);
310 /* OK, we are all set to update the ftrace_trace_function now! */
311#endif /* !CONFIG_DYNAMIC_FTRACE */
312
313 ftrace_trace_function = func;
314}
315
316int using_ftrace_ops_list_func(void)
317{
318 return ftrace_trace_function == ftrace_ops_list_func;
319}
320
376static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
377{
378 ops->next = *list;
379 /*
380 * We are entering ops into the list but another
381 * CPU might be walking that list. We need to make sure
382 * the ops->next pointer is valid before another CPU sees
383 * the ops pointer included into the list.

--- 45 unchanged lines hidden (view full) ---

429 return ret;
430}
431
432static int __register_ftrace_function(struct ftrace_ops *ops)
433{
434 if (ops->flags & FTRACE_OPS_FL_DELETED)
435 return -EINVAL;
436
321static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
322{
323 ops->next = *list;
324 /*
325 * We are entering ops into the list but another
326 * CPU might be walking that list. We need to make sure
327 * the ops->next pointer is valid before another CPU sees
328 * the ops pointer included into the list.

--- 45 unchanged lines hidden (view full) ---

374 return ret;
375}
376
377static int __register_ftrace_function(struct ftrace_ops *ops)
378{
379 if (ops->flags & FTRACE_OPS_FL_DELETED)
380 return -EINVAL;
381
437 if (FTRACE_WARN_ON(ops == &global_ops))
438 return -EINVAL;
439
440 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
441 return -EBUSY;
442
382 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
383 return -EBUSY;
384
443 /* We don't support both control and global flags set. */
444 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
445 return -EINVAL;
446
447#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
448 /*
449 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
450 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
451 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
452 */
453 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
454 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
455 return -EINVAL;
456
457 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
458 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
459#endif
460
461 if (!core_kernel_data((unsigned long)ops))
462 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
463
385#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
386 /*
387 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
388 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
389 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
390 */
391 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
392 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
393 return -EINVAL;
394
395 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
396 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
397#endif
398
399 if (!core_kernel_data((unsigned long)ops))
400 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
401
464 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
465 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
466 ops->flags |= FTRACE_OPS_FL_ENABLED;
467 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
402 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
468 if (control_ops_alloc(ops))
469 return -ENOMEM;
470 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
471 } else
472 add_ftrace_ops(&ftrace_ops_list, ops);
473
474 if (ftrace_enabled)
475 update_ftrace_function();
476
477 return 0;
478}
479
480static int __unregister_ftrace_function(struct ftrace_ops *ops)
481{
482 int ret;
483
484 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
485 return -EBUSY;
486
403 if (control_ops_alloc(ops))
404 return -ENOMEM;
405 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
406 } else
407 add_ftrace_ops(&ftrace_ops_list, ops);
408
409 if (ftrace_enabled)
410 update_ftrace_function();
411
412 return 0;
413}
414
415static int __unregister_ftrace_function(struct ftrace_ops *ops)
416{
417 int ret;
418
419 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
420 return -EBUSY;
421
487 if (FTRACE_WARN_ON(ops == &global_ops))
488 return -EINVAL;
489
490 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
491 ret = remove_ftrace_list_ops(&ftrace_global_list,
492 &global_ops, ops);
493 if (!ret)
494 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
495 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
422 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
496 ret = remove_ftrace_list_ops(&ftrace_control_list,
497 &control_ops, ops);
498 } else
499 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
500
501 if (ret < 0)
502 return ret;
503

--- 386 unchanged lines hidden (view full) ---

890 struct ftrace_profile *rec;
891 unsigned long flags;
892
893 if (!ftrace_profile_enabled)
894 return;
895
896 local_irq_save(flags);
897
423 ret = remove_ftrace_list_ops(&ftrace_control_list,
424 &control_ops, ops);
425 } else
426 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
427
428 if (ret < 0)
429 return ret;
430

--- 386 unchanged lines hidden (view full) ---

817 struct ftrace_profile *rec;
818 unsigned long flags;
819
820 if (!ftrace_profile_enabled)
821 return;
822
823 local_irq_save(flags);
824
898 stat = &__get_cpu_var(ftrace_profile_stats);
825 stat = this_cpu_ptr(&ftrace_profile_stats);
899 if (!stat->hash || !ftrace_profile_enabled)
900 goto out;
901
902 rec = ftrace_find_profiled_func(stat, ip);
903 if (!rec) {
904 rec = ftrace_profile_alloc(stat, ip);
905 if (!rec)
906 goto out;

--- 14 unchanged lines hidden (view full) ---

921static void profile_graph_return(struct ftrace_graph_ret *trace)
922{
923 struct ftrace_profile_stat *stat;
924 unsigned long long calltime;
925 struct ftrace_profile *rec;
926 unsigned long flags;
927
928 local_irq_save(flags);
826 if (!stat->hash || !ftrace_profile_enabled)
827 goto out;
828
829 rec = ftrace_find_profiled_func(stat, ip);
830 if (!rec) {
831 rec = ftrace_profile_alloc(stat, ip);
832 if (!rec)
833 goto out;

--- 14 unchanged lines hidden (view full) ---

848static void profile_graph_return(struct ftrace_graph_ret *trace)
849{
850 struct ftrace_profile_stat *stat;
851 unsigned long long calltime;
852 struct ftrace_profile *rec;
853 unsigned long flags;
854
855 local_irq_save(flags);
929 stat = &__get_cpu_var(ftrace_profile_stats);
856 stat = this_cpu_ptr(&ftrace_profile_stats);
930 if (!stat->hash || !ftrace_profile_enabled)
931 goto out;
932
933 /* If the calltime was zero'd ignore it */
934 if (!trace->calltime)
935 goto out;
936
937 calltime = trace->rettime - trace->calltime;

--- 1185 unchanged lines hidden (view full) ---

2123
2124 ret = __register_ftrace_function(ops);
2125 if (ret)
2126 return ret;
2127
2128 ftrace_start_up++;
2129 command |= FTRACE_UPDATE_CALLS;
2130
857 if (!stat->hash || !ftrace_profile_enabled)
858 goto out;
859
860 /* If the calltime was zero'd ignore it */
861 if (!trace->calltime)
862 goto out;
863
864 calltime = trace->rettime - trace->calltime;

--- 1185 unchanged lines hidden (view full) ---

2050
2051 ret = __register_ftrace_function(ops);
2052 if (ret)
2053 return ret;
2054
2055 ftrace_start_up++;
2056 command |= FTRACE_UPDATE_CALLS;
2057
2131 /* ops marked global share the filter hashes */
2132 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2133 ops = &global_ops;
2134 /* Don't update hash if global is already set */
2135 if (global_start_up)
2136 hash_enable = false;
2137 global_start_up++;
2138 }
2139
2140 ops->flags |= FTRACE_OPS_FL_ENABLED;
2141 if (hash_enable)
2142 ftrace_hash_rec_enable(ops, 1);
2143
2144 ftrace_startup_enable(command);
2145
2146 return 0;
2147}

--- 13 unchanged lines hidden (view full) ---

2161 ftrace_start_up--;
2162 /*
2163 * Just warn in case of unbalance, no need to kill ftrace, it's not
2164 * critical but the ftrace_call callers may be never nopped again after
2165 * further ftrace uses.
2166 */
2167 WARN_ON_ONCE(ftrace_start_up < 0);
2168
2058 ops->flags |= FTRACE_OPS_FL_ENABLED;
2059 if (hash_enable)
2060 ftrace_hash_rec_enable(ops, 1);
2061
2062 ftrace_startup_enable(command);
2063
2064 return 0;
2065}

--- 13 unchanged lines hidden (view full) ---

2079 ftrace_start_up--;
2080 /*
2081 * Just warn in case of unbalance, no need to kill ftrace, it's not
2082 * critical but the ftrace_call callers may be never nopped again after
2083 * further ftrace uses.
2084 */
2085 WARN_ON_ONCE(ftrace_start_up < 0);
2086
2169 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2170 ops = &global_ops;
2171 global_start_up--;
2172 WARN_ON_ONCE(global_start_up < 0);
2173 /* Don't update hash if global still has users */
2174 if (global_start_up) {
2175 WARN_ON_ONCE(!ftrace_start_up);
2176 hash_disable = false;
2177 }
2178 }
2179
2180 if (hash_disable)
2181 ftrace_hash_rec_disable(ops, 1);
2182
2087 if (hash_disable)
2088 ftrace_hash_rec_disable(ops, 1);
2089
2183 if (ops != &global_ops || !global_start_up)
2090 if (!global_start_up)
2184 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2185
2186 command |= FTRACE_UPDATE_CALLS;
2187
2188 if (saved_ftrace_func != ftrace_trace_function) {
2189 saved_ftrace_func = ftrace_trace_function;
2190 command |= FTRACE_UPDATE_TRACE_FUNC;
2191 }

--- 1327 unchanged lines hidden (view full) ---

3519static int
3520ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3521 unsigned long ip, int remove, int reset, int enable)
3522{
3523 struct ftrace_hash **orig_hash;
3524 struct ftrace_hash *hash;
3525 int ret;
3526
2091 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2092
2093 command |= FTRACE_UPDATE_CALLS;
2094
2095 if (saved_ftrace_func != ftrace_trace_function) {
2096 saved_ftrace_func = ftrace_trace_function;
2097 command |= FTRACE_UPDATE_TRACE_FUNC;
2098 }

--- 1327 unchanged lines hidden (view full) ---

3426static int
3427ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3428 unsigned long ip, int remove, int reset, int enable)
3429{
3430 struct ftrace_hash **orig_hash;
3431 struct ftrace_hash *hash;
3432 int ret;
3433
3527 /* All global ops uses the global ops filters */
3528 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3529 ops = &global_ops;
3530
3531 if (unlikely(ftrace_disabled))
3532 return -ENODEV;
3533
3534 mutex_lock(&ops->regex_lock);
3535
3536 if (enable)
3537 orig_hash = &ops->filter_hash;
3538 else

--- 95 unchanged lines hidden (view full) ---

3634int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3635 int len, int reset)
3636{
3637 ftrace_ops_init(ops);
3638 return ftrace_set_regex(ops, buf, len, reset, 0);
3639}
3640EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3641/**
3434 if (unlikely(ftrace_disabled))
3435 return -ENODEV;
3436
3437 mutex_lock(&ops->regex_lock);
3438
3439 if (enable)
3440 orig_hash = &ops->filter_hash;
3441 else

--- 95 unchanged lines hidden (view full) ---

3537int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3538 int len, int reset)
3539{
3540 ftrace_ops_init(ops);
3541 return ftrace_set_regex(ops, buf, len, reset, 0);
3542}
3543EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3544/**
3642 * ftrace_set_filter - set a function to filter on in ftrace
3643 * @ops - the ops to set the filter with
3545 * ftrace_set_global_filter - set a function to filter on with global tracers
3644 * @buf - the string that holds the function filter text.
3645 * @len - the length of the string.
3646 * @reset - non zero to reset all filters before applying this filter.
3647 *
3648 * Filters denote which functions should be enabled when tracing is enabled.
3649 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3650 */
3651void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3652{
3653 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3654}
3655EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3656
3657/**
3546 * @buf - the string that holds the function filter text.
3547 * @len - the length of the string.
3548 * @reset - non zero to reset all filters before applying this filter.
3549 *
3550 * Filters denote which functions should be enabled when tracing is enabled.
3551 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3552 */
3553void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3554{
3555 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3556}
3557EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3558
3559/**
3658 * ftrace_set_notrace - set a function to not trace in ftrace
3659 * @ops - the ops to set the notrace filter with
3560 * ftrace_set_global_notrace - set a function to not trace with global tracers
3660 * @buf - the string that holds the function notrace text.
3661 * @len - the length of the string.
3662 * @reset - non zero to reset all filters before applying this filter.
3663 *
3664 * Notrace Filters denote which functions should not be enabled when tracing
3665 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3666 * for tracing.
3667 */

--- 789 unchanged lines hidden (view full) ---

4457static inline int
4458ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4459{
4460 return 1;
4461}
4462
4463#endif /* CONFIG_DYNAMIC_FTRACE */
4464
3561 * @buf - the string that holds the function notrace text.
3562 * @len - the length of the string.
3563 * @reset - non zero to reset all filters before applying this filter.
3564 *
3565 * Notrace Filters denote which functions should not be enabled when tracing
3566 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3567 * for tracing.
3568 */

--- 789 unchanged lines hidden (view full) ---

4358static inline int
4359ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4360{
4361 return 1;
4362}
4363
4364#endif /* CONFIG_DYNAMIC_FTRACE */
4365
4366__init void ftrace_init_global_array_ops(struct trace_array *tr)
4367{
4368 tr->ops = &global_ops;
4369 tr->ops->private = tr;
4370}
4371
4372void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4373{
4374 /* If we filter on pids, update to use the pid function */
4375 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4376 if (WARN_ON(tr->ops->func != ftrace_stub))
4377 printk("ftrace ops had %pS for function\n",
4378 tr->ops->func);
4379 /* Only the top level instance does pid tracing */
4380 if (!list_empty(&ftrace_pids)) {
4381 set_ftrace_pid_function(func);
4382 func = ftrace_pid_func;
4383 }
4384 }
4385 tr->ops->func = func;
4386 tr->ops->private = tr;
4387}
4388
4389void ftrace_reset_array_ops(struct trace_array *tr)
4390{
4391 tr->ops->func = ftrace_stub;
4392}
4393
4465static void
4466ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4467 struct ftrace_ops *op, struct pt_regs *regs)
4468{
4469 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4470 return;
4471
4472 /*

--- 42 unchanged lines hidden (view full) ---

4515 return;
4516
4517 /*
4518 * Some of the ops may be dynamically allocated,
4519 * they must be freed after a synchronize_sched().
4520 */
4521 preempt_disable_notrace();
4522 do_for_each_ftrace_op(op, ftrace_ops_list) {
4394static void
4395ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4396 struct ftrace_ops *op, struct pt_regs *regs)
4397{
4398 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4399 return;
4400
4401 /*

--- 42 unchanged lines hidden (view full) ---

4444 return;
4445
4446 /*
4447 * Some of the ops may be dynamically allocated,
4448 * they must be freed after a synchronize_sched().
4449 */
4450 preempt_disable_notrace();
4451 do_for_each_ftrace_op(op, ftrace_ops_list) {
4523 if (ftrace_ops_test(op, ip, regs))
4452 if (ftrace_ops_test(op, ip, regs)) {
4453 if (WARN_ON(!op->func)) {
4454 function_trace_stop = 1;
4455 printk("op=%p %pS\n", op, op);
4456 goto out;
4457 }
4524 op->func(ip, parent_ip, op, regs);
4458 op->func(ip, parent_ip, op, regs);
4459 }
4525 } while_for_each_ftrace_op(op);
4460 } while_for_each_ftrace_op(op);
4461out:
4526 preempt_enable_notrace();
4527 trace_clear_recursion(bit);
4528}
4529
4530/*
4531 * Some archs only support passing ip and parent_ip. Even though
4532 * the list function ignores the op parameter, we do not want any
4533 * C side effects, where a function is called without the caller

--- 388 unchanged lines hidden (view full) ---

4922 out:
4923 mutex_unlock(&ftrace_lock);
4924 return ret;
4925}
4926
4927#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4928
4929static int ftrace_graph_active;
4462 preempt_enable_notrace();
4463 trace_clear_recursion(bit);
4464}
4465
4466/*
4467 * Some archs only support passing ip and parent_ip. Even though
4468 * the list function ignores the op parameter, we do not want any
4469 * C side effects, where a function is called without the caller

--- 388 unchanged lines hidden (view full) ---

4858 out:
4859 mutex_unlock(&ftrace_lock);
4860 return ret;
4861}
4862
4863#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4864
4865static int ftrace_graph_active;
4930static struct notifier_block ftrace_suspend_notifier;
4931
4932int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4933{
4934 return 0;
4935}
4936
4937/* The callbacks that hook a function */
4938trace_func_graph_ret_t ftrace_graph_return =

--- 129 unchanged lines hidden (view full) ---

5068
5069 case PM_POST_HIBERNATION:
5070 unpause_graph_tracing();
5071 break;
5072 }
5073 return NOTIFY_DONE;
5074}
5075
4866
4867int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4868{
4869 return 0;
4870}
4871
4872/* The callbacks that hook a function */
4873trace_func_graph_ret_t ftrace_graph_return =

--- 129 unchanged lines hidden (view full) ---

5003
5004 case PM_POST_HIBERNATION:
5005 unpause_graph_tracing();
5006 break;
5007 }
5008 return NOTIFY_DONE;
5009}
5010
5076/* Just a place holder for function graph */
5077static struct ftrace_ops fgraph_ops __read_mostly = {
5078 .func = ftrace_stub,
5079 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5080 FTRACE_OPS_FL_RECURSION_SAFE,
5081};
5082
5083static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5084{
5085 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5086 return 0;
5087 return __ftrace_graph_entry(trace);
5088}
5089
5090/*

--- 8 unchanged lines hidden (view full) ---

5099 if (ftrace_ops_list == &ftrace_list_end ||
5100 (ftrace_ops_list == &global_ops &&
5101 global_ops.next == &ftrace_list_end))
5102 ftrace_graph_entry = __ftrace_graph_entry;
5103 else
5104 ftrace_graph_entry = ftrace_graph_entry_test;
5105}
5106
5011static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5012{
5013 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5014 return 0;
5015 return __ftrace_graph_entry(trace);
5016}
5017
5018/*

--- 8 unchanged lines hidden (view full) ---

5027 if (ftrace_ops_list == &ftrace_list_end ||
5028 (ftrace_ops_list == &global_ops &&
5029 global_ops.next == &ftrace_list_end))
5030 ftrace_graph_entry = __ftrace_graph_entry;
5031 else
5032 ftrace_graph_entry = ftrace_graph_entry_test;
5033}
5034
5035static struct notifier_block ftrace_suspend_notifier = {
5036 .notifier_call = ftrace_suspend_notifier_call,
5037};
5038
5107int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5108 trace_func_graph_ent_t entryfunc)
5109{
5110 int ret = 0;
5111
5112 mutex_lock(&ftrace_lock);
5113
5114 /* we currently allow only one tracer registered at a time */
5115 if (ftrace_graph_active) {
5116 ret = -EBUSY;
5117 goto out;
5118 }
5119
5039int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5040 trace_func_graph_ent_t entryfunc)
5041{
5042 int ret = 0;
5043
5044 mutex_lock(&ftrace_lock);
5045
5046 /* we currently allow only one tracer registered at a time */
5047 if (ftrace_graph_active) {
5048 ret = -EBUSY;
5049 goto out;
5050 }
5051
5120 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5121 register_pm_notifier(&ftrace_suspend_notifier);
5122
5123 ftrace_graph_active++;
5124 ret = start_graph_tracing();
5125 if (ret) {
5126 ftrace_graph_active--;
5127 goto out;
5128 }

--- 5 unchanged lines hidden (view full) ---

5134 * function that gets called to the entry_test first. Then
5135 * call the update fgraph entry function to determine if
5136 * the entryfunc should be called directly or not.
5137 */
5138 __ftrace_graph_entry = entryfunc;
5139 ftrace_graph_entry = ftrace_graph_entry_test;
5140 update_function_graph_func();
5141
5052 register_pm_notifier(&ftrace_suspend_notifier);
5053
5054 ftrace_graph_active++;
5055 ret = start_graph_tracing();
5056 if (ret) {
5057 ftrace_graph_active--;
5058 goto out;
5059 }

--- 5 unchanged lines hidden (view full) ---

5065 * function that gets called to the entry_test first. Then
5066 * call the update fgraph entry function to determine if
5067 * the entryfunc should be called directly or not.
5068 */
5069 __ftrace_graph_entry = entryfunc;
5070 ftrace_graph_entry = ftrace_graph_entry_test;
5071 update_function_graph_func();
5072
5142 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5073 /* Function graph doesn't use the .func field of global_ops */
5074 global_ops.flags |= FTRACE_OPS_FL_STUB;
5143
5075
5076 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5077
5144out:
5145 mutex_unlock(&ftrace_lock);
5146 return ret;
5147}
5148
5149void unregister_ftrace_graph(void)
5150{
5151 mutex_lock(&ftrace_lock);
5152
5153 if (unlikely(!ftrace_graph_active))
5154 goto out;
5155
5156 ftrace_graph_active--;
5157 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5158 ftrace_graph_entry = ftrace_graph_entry_stub;
5159 __ftrace_graph_entry = ftrace_graph_entry_stub;
5078out:
5079 mutex_unlock(&ftrace_lock);
5080 return ret;
5081}
5082
5083void unregister_ftrace_graph(void)
5084{
5085 mutex_lock(&ftrace_lock);
5086
5087 if (unlikely(!ftrace_graph_active))
5088 goto out;
5089
5090 ftrace_graph_active--;
5091 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5092 ftrace_graph_entry = ftrace_graph_entry_stub;
5093 __ftrace_graph_entry = ftrace_graph_entry_stub;
5160 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5094 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
5095 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5161 unregister_pm_notifier(&ftrace_suspend_notifier);
5162 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5163
5164 out:
5165 mutex_unlock(&ftrace_lock);
5166}
5167
5168static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

--- 77 unchanged lines hidden ---
5096 unregister_pm_notifier(&ftrace_suspend_notifier);
5097 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5098
5099 out:
5100 mutex_unlock(&ftrace_lock);
5101}
5102
5103static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);

--- 77 unchanged lines hidden ---