1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Ftrace header. For implementation details beyond the random comments
4 * scattered below, see: Documentation/trace/ftrace-design.rst
5 */
6
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9
10 #include <linux/trace_recursion.h>
11 #include <linux/trace_clock.h>
12 #include <linux/jump_label.h>
13 #include <linux/kallsyms.h>
14 #include <linux/linkage.h>
15 #include <linux/bitops.h>
16 #include <linux/ptrace.h>
17 #include <linux/ktime.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22
23 #include <asm/ftrace.h>
24
25 /*
26 * If the arch supports passing the variable contents of
27 * function_trace_op as the third parameter back from the
28 * mcount call, then the arch should define this as 1.
29 */
30 #ifndef ARCH_SUPPORTS_FTRACE_OPS
31 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 #endif
33
34 #ifdef CONFIG_TRACING
35 extern void ftrace_boot_snapshot(void);
36 #else
ftrace_boot_snapshot(void)37 static inline void ftrace_boot_snapshot(void) { }
38 #endif
39
40 struct ftrace_ops;
41 struct ftrace_regs;
42 struct dyn_ftrace;
43
44 char *arch_ftrace_match_adjust(char *str, const char *search);
45
46 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
47 struct fgraph_ret_regs;
48 unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
49 #else
50 unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
51 #endif
52
53 #ifdef CONFIG_FUNCTION_TRACER
54 /*
55 * If the arch's mcount caller does not support all of ftrace's
56 * features, then it must call an indirect function that
57 * does. Or at least does enough to prevent any unwelcome side effects.
58 *
59 * Also define the function prototype that these architectures use
60 * to call the ftrace_ops_list_func().
61 */
62 #if !ARCH_SUPPORTS_FTRACE_OPS
63 # define FTRACE_FORCE_LIST_FUNC 1
64 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
65 #else
66 # define FTRACE_FORCE_LIST_FUNC 0
67 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
68 struct ftrace_ops *op, struct ftrace_regs *fregs);
69 #endif
70 extern const struct ftrace_ops ftrace_nop_ops;
71 extern const struct ftrace_ops ftrace_list_ops;
72 struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
73 #endif /* CONFIG_FUNCTION_TRACER */
74
75 /* Main tracing buffer and events set up */
76 #ifdef CONFIG_TRACING
77 void trace_init(void);
78 void early_trace_init(void);
79 #else
trace_init(void)80 static inline void trace_init(void) { }
early_trace_init(void)81 static inline void early_trace_init(void) { }
82 #endif
83
84 struct module;
85 struct ftrace_hash;
86
87 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
88 defined(CONFIG_DYNAMIC_FTRACE)
89 int
90 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
91 unsigned long *off, char **modname, char *sym);
92 #else
93 static inline int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)94 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
95 unsigned long *off, char **modname, char *sym)
96 {
97 return 0;
98 }
99 #endif
100
101 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
102 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
103 char *type, char *name,
104 char *module_name, int *exported);
105 #else
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)106 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
107 char *type, char *name,
108 char *module_name, int *exported)
109 {
110 return -1;
111 }
112 #endif
113
114 #ifdef CONFIG_FUNCTION_TRACER
115
116 extern int ftrace_enabled;
117
118 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
119
120 struct ftrace_regs {
121 struct pt_regs regs;
122 };
123 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
124
125 /*
126 * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
127 * if to allow setting of the instruction pointer from the ftrace_regs when
128 * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
129 */
130 #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
131 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
132
ftrace_get_regs(struct ftrace_regs * fregs)133 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
134 {
135 if (!fregs)
136 return NULL;
137
138 return arch_ftrace_get_regs(fregs);
139 }
140
141 /*
142 * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
143 * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
144 */
ftrace_regs_has_args(struct ftrace_regs * fregs)145 static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
146 {
147 if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
148 return true;
149
150 return ftrace_get_regs(fregs) != NULL;
151 }
152
153 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
154 #define ftrace_regs_get_instruction_pointer(fregs) \
155 instruction_pointer(ftrace_get_regs(fregs))
156 #define ftrace_regs_get_argument(fregs, n) \
157 regs_get_kernel_argument(ftrace_get_regs(fregs), n)
158 #define ftrace_regs_get_stack_pointer(fregs) \
159 kernel_stack_pointer(ftrace_get_regs(fregs))
160 #define ftrace_regs_return_value(fregs) \
161 regs_return_value(ftrace_get_regs(fregs))
162 #define ftrace_regs_set_return_value(fregs, ret) \
163 regs_set_return_value(ftrace_get_regs(fregs), ret)
164 #define ftrace_override_function_with_return(fregs) \
165 override_function_with_return(ftrace_get_regs(fregs))
166 #define ftrace_regs_query_register_offset(name) \
167 regs_query_register_offset(name)
168 #endif
169
170 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
171 struct ftrace_ops *op, struct ftrace_regs *fregs);
172
173 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
174
175 /*
176 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
177 * set in the flags member.
178 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
179 * IPMODIFY are a kind of attribute flags which can be set only before
180 * registering the ftrace_ops, and can not be modified while registered.
181 * Changing those attribute flags after registering ftrace_ops will
182 * cause unexpected results.
183 *
184 * ENABLED - set/unset when ftrace_ops is registered/unregistered
185 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
186 * allocated ftrace_ops which need special care
187 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
188 * and passed to the callback. If this flag is set, but the
189 * architecture does not support passing regs
190 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
191 * ftrace_ops will fail to register, unless the next flag
192 * is set.
193 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
194 * handler can handle an arch that does not save regs
195 * (the handler tests if regs == NULL), then it can set
196 * this flag instead. It will not fail registering the ftrace_ops
197 * but, the regs field will be NULL if the arch does not support
198 * passing regs to the handler.
199 * Note, if this flag is set, the SAVE_REGS flag will automatically
200 * get set upon registering the ftrace_ops, if the arch supports it.
201 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
202 * that the call back needs recursion protection. If it does
203 * not set this, then the ftrace infrastructure will assume
204 * that the callback can handle recursion on its own.
205 * STUB - The ftrace_ops is just a place holder.
206 * INITIALIZED - The ftrace_ops has already been initialized (first use time
207 * register_ftrace_function() is called, it will initialized the ops)
208 * DELETED - The ops are being deleted, do not let them be registered again.
209 * ADDING - The ops is in the process of being added.
210 * REMOVING - The ops is in the process of being removed.
211 * MODIFYING - The ops is in the process of changing its filter functions.
212 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
213 * The arch specific code sets this flag when it allocated a
214 * trampoline. This lets the arch know that it can update the
215 * trampoline in case the callback function changes.
216 * The ftrace_ops trampoline can be set by the ftrace users, and
217 * in such cases the arch must not modify it. Only the arch ftrace
218 * core code should set this flag.
219 * IPMODIFY - The ops can modify the IP register. This can only be set with
220 * SAVE_REGS. If another ops with this flag set is already registered
221 * for any of the functions that this ops will be registered for, then
222 * this ops will fail to register or set_filter_ip.
223 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
224 * RCU - Set when the ops can only be called when RCU is watching.
225 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
226 * PERMANENT - Set when the ops is permanent and should not be affected by
227 * ftrace_enabled.
228 * DIRECT - Used by the direct ftrace_ops helper for direct functions
229 * (internal ftrace only, should not be used by others)
230 * SUBOP - Is controlled by another op in field managed.
231 */
232 enum {
233 FTRACE_OPS_FL_ENABLED = BIT(0),
234 FTRACE_OPS_FL_DYNAMIC = BIT(1),
235 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
236 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
237 FTRACE_OPS_FL_RECURSION = BIT(4),
238 FTRACE_OPS_FL_STUB = BIT(5),
239 FTRACE_OPS_FL_INITIALIZED = BIT(6),
240 FTRACE_OPS_FL_DELETED = BIT(7),
241 FTRACE_OPS_FL_ADDING = BIT(8),
242 FTRACE_OPS_FL_REMOVING = BIT(9),
243 FTRACE_OPS_FL_MODIFYING = BIT(10),
244 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
245 FTRACE_OPS_FL_IPMODIFY = BIT(12),
246 FTRACE_OPS_FL_PID = BIT(13),
247 FTRACE_OPS_FL_RCU = BIT(14),
248 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
249 FTRACE_OPS_FL_PERMANENT = BIT(16),
250 FTRACE_OPS_FL_DIRECT = BIT(17),
251 FTRACE_OPS_FL_SUBOP = BIT(18),
252 };
253
254 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
255 #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
256 #else
257 #define FTRACE_OPS_FL_SAVE_ARGS 0
258 #endif
259
260 /*
261 * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
262 * to a ftrace_ops. Note, the requests may fail.
263 *
264 * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
265 * function as an ops with IPMODIFY. Called
266 * when the DIRECT ops is being registered.
267 * This is called with both direct_mutex and
268 * ftrace_lock are locked.
269 *
270 * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
271 * function as an ops with IPMODIFY. Called
272 * when the other ops (the one with IPMODIFY)
273 * is being registered.
274 * This is called with direct_mutex locked.
275 *
276 * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
277 * function as an ops with IPMODIFY. Called
278 * when the other ops (the one with IPMODIFY)
279 * is being unregistered.
280 * This is called with direct_mutex locked.
281 */
282 enum ftrace_ops_cmd {
283 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
284 FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
285 FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
286 };
287
288 /*
289 * For most ftrace_ops_cmd,
290 * Returns:
291 * 0 - Success.
292 * Negative on failure. The return value is dependent on the
293 * callback.
294 */
295 typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
296
297 #ifdef CONFIG_DYNAMIC_FTRACE
298 /* The hash used to know what functions callbacks trace */
299 struct ftrace_ops_hash {
300 struct ftrace_hash __rcu *notrace_hash;
301 struct ftrace_hash __rcu *filter_hash;
302 struct mutex regex_lock;
303 };
304
305 void ftrace_free_init_mem(void);
306 void ftrace_free_mem(struct module *mod, void *start, void *end);
307 #else
ftrace_free_init_mem(void)308 static inline void ftrace_free_init_mem(void)
309 {
310 ftrace_boot_snapshot();
311 }
ftrace_free_mem(struct module * mod,void * start,void * end)312 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
313 #endif
314
315 /*
316 * Note, ftrace_ops can be referenced outside of RCU protection, unless
317 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
318 * core data, the unregistering of it will perform a scheduling on all CPUs
319 * to make sure that there are no more users. Depending on the load of the
320 * system that may take a bit of time.
321 *
322 * Any private data added must also take care not to be freed and if private
323 * data is added to a ftrace_ops that is in core code, the user of the
324 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
325 */
326 struct ftrace_ops {
327 ftrace_func_t func;
328 struct ftrace_ops __rcu *next;
329 unsigned long flags;
330 void *private;
331 ftrace_func_t saved_func;
332 #ifdef CONFIG_DYNAMIC_FTRACE
333 struct ftrace_ops_hash local_hash;
334 struct ftrace_ops_hash *func_hash;
335 struct ftrace_ops_hash old_hash;
336 unsigned long trampoline;
337 unsigned long trampoline_size;
338 struct list_head list;
339 struct list_head subop_list;
340 ftrace_ops_func_t ops_func;
341 struct ftrace_ops *managed;
342 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
343 unsigned long direct_call;
344 #endif
345 #endif
346 };
347
348 extern struct ftrace_ops __rcu *ftrace_ops_list;
349 extern struct ftrace_ops ftrace_list_end;
350
351 /*
352 * Traverse the ftrace_ops_list, invoking all entries. The reason that we
353 * can use rcu_dereference_raw_check() is that elements removed from this list
354 * are simply leaked, so there is no need to interact with a grace-period
355 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
356 * concurrent insertions into the ftrace_ops_list.
357 *
358 * Silly Alpha and silly pointer-speculation compiler optimizations!
359 */
360 #define do_for_each_ftrace_op(op, list) \
361 op = rcu_dereference_raw_check(list); \
362 do
363
364 /*
365 * Optimized for just a single item in the list (as that is the normal case).
366 */
367 #define while_for_each_ftrace_op(op) \
368 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
369 unlikely((op) != &ftrace_list_end))
370
371 /*
372 * Type of the current tracing.
373 */
374 enum ftrace_tracing_type_t {
375 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
376 FTRACE_TYPE_RETURN, /* Hook the return of the function */
377 };
378
379 /* Current tracing type, default is FTRACE_TYPE_ENTER */
380 extern enum ftrace_tracing_type_t ftrace_tracing_type;
381
382 /*
383 * The ftrace_ops must be a static and should also
384 * be read_mostly. These functions do modify read_mostly variables
385 * so use them sparely. Never free an ftrace_op or modify the
386 * next pointer after it has been registered. Even after unregistering
387 * it, the next pointer may still be used internally.
388 */
389 int register_ftrace_function(struct ftrace_ops *ops);
390 int unregister_ftrace_function(struct ftrace_ops *ops);
391
392 extern void ftrace_stub(unsigned long a0, unsigned long a1,
393 struct ftrace_ops *op, struct ftrace_regs *fregs);
394
395
396 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
397 #else /* !CONFIG_FUNCTION_TRACER */
398 /*
399 * (un)register_ftrace_function must be a macro since the ops parameter
400 * must not be evaluated.
401 */
402 #define register_ftrace_function(ops) ({ 0; })
403 #define unregister_ftrace_function(ops) ({ 0; })
ftrace_kill(void)404 static inline void ftrace_kill(void) { }
ftrace_free_init_mem(void)405 static inline void ftrace_free_init_mem(void) { }
ftrace_free_mem(struct module * mod,void * start,void * end)406 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)407 static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
408 {
409 return -EOPNOTSUPP;
410 }
411 #endif /* CONFIG_FUNCTION_TRACER */
412
413 struct ftrace_func_entry {
414 struct hlist_node hlist;
415 unsigned long ip;
416 unsigned long direct; /* for direct lookup only */
417 };
418
419 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
420 unsigned long ftrace_find_rec_direct(unsigned long ip);
421 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
422 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
423 bool free_filters);
424 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
425 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
426
427 void ftrace_stub_direct_tramp(void);
428
429 #else
430 struct ftrace_ops;
ftrace_find_rec_direct(unsigned long ip)431 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
432 {
433 return 0;
434 }
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)435 static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
436 {
437 return -ENODEV;
438 }
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)439 static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
440 bool free_filters)
441 {
442 return -ENODEV;
443 }
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)444 static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
445 {
446 return -ENODEV;
447 }
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)448 static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
449 {
450 return -ENODEV;
451 }
452
453 /*
454 * This must be implemented by the architecture.
455 * It is the way the ftrace direct_ops helper, when called
456 * via ftrace (because there's other callbacks besides the
457 * direct call), can inform the architecture's trampoline that this
458 * routine has a direct caller, and what the caller is.
459 *
460 * For example, in x86, it returns the direct caller
461 * callback function via the regs->orig_ax parameter.
462 * Then in the ftrace trampoline, if this is set, it makes
463 * the return from the trampoline jump to the direct caller
464 * instead of going back to the function it just traced.
465 */
arch_ftrace_set_direct_caller(struct ftrace_regs * fregs,unsigned long addr)466 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
467 unsigned long addr) { }
468 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
469
470 #ifdef CONFIG_STACK_TRACER
471
472 extern int stack_tracer_enabled;
473
474 int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
475 size_t *lenp, loff_t *ppos);
476
477 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
478 DECLARE_PER_CPU(int, disable_stack_tracer);
479
480 /**
481 * stack_tracer_disable - temporarily disable the stack tracer
482 *
483 * There's a few locations (namely in RCU) where stack tracing
484 * cannot be executed. This function is used to disable stack
485 * tracing during those critical sections.
486 *
487 * This function must be called with preemption or interrupts
488 * disabled and stack_tracer_enable() must be called shortly after
489 * while preemption or interrupts are still disabled.
490 */
stack_tracer_disable(void)491 static inline void stack_tracer_disable(void)
492 {
493 /* Preemption or interrupts must be disabled */
494 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
495 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
496 this_cpu_inc(disable_stack_tracer);
497 }
498
499 /**
500 * stack_tracer_enable - re-enable the stack tracer
501 *
502 * After stack_tracer_disable() is called, stack_tracer_enable()
503 * must be called shortly afterward.
504 */
stack_tracer_enable(void)505 static inline void stack_tracer_enable(void)
506 {
507 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
508 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
509 this_cpu_dec(disable_stack_tracer);
510 }
511 #else
stack_tracer_disable(void)512 static inline void stack_tracer_disable(void) { }
stack_tracer_enable(void)513 static inline void stack_tracer_enable(void) { }
514 #endif
515
516 enum {
517 FTRACE_UPDATE_CALLS = (1 << 0),
518 FTRACE_DISABLE_CALLS = (1 << 1),
519 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
520 FTRACE_START_FUNC_RET = (1 << 3),
521 FTRACE_STOP_FUNC_RET = (1 << 4),
522 FTRACE_MAY_SLEEP = (1 << 5),
523 };
524
525 #ifdef CONFIG_DYNAMIC_FTRACE
526
527 void ftrace_arch_code_modify_prepare(void);
528 void ftrace_arch_code_modify_post_process(void);
529
530 enum ftrace_bug_type {
531 FTRACE_BUG_UNKNOWN,
532 FTRACE_BUG_INIT,
533 FTRACE_BUG_NOP,
534 FTRACE_BUG_CALL,
535 FTRACE_BUG_UPDATE,
536 };
537 extern enum ftrace_bug_type ftrace_bug_type;
538
539 /*
540 * Archs can set this to point to a variable that holds the value that was
541 * expected at the call site before calling ftrace_bug().
542 */
543 extern const void *ftrace_expected;
544
545 void ftrace_bug(int err, struct dyn_ftrace *rec);
546
547 struct seq_file;
548
549 extern int ftrace_text_reserved(const void *start, const void *end);
550
551 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
552
553 bool is_ftrace_trampoline(unsigned long addr);
554
555 /*
556 * The dyn_ftrace record's flags field is split into two parts.
557 * the first part which is '0-FTRACE_REF_MAX' is a counter of
558 * the number of callbacks that have registered the function that
559 * the dyn_ftrace descriptor represents.
560 *
561 * The second part is a mask:
562 * ENABLED - the function is being traced
563 * REGS - the record wants the function to save regs
564 * REGS_EN - the function is set up to save regs.
565 * IPMODIFY - the record allows for the IP address to be changed.
566 * DISABLED - the record is not ready to be touched yet
567 * DIRECT - there is a direct function to call
568 * CALL_OPS - the record can use callsite-specific ops
569 * CALL_OPS_EN - the function is set up to use callsite-specific ops
570 * TOUCHED - A callback was added since boot up
571 * MODIFIED - The function had IPMODIFY or DIRECT attached to it
572 *
573 * When a new ftrace_ops is registered and wants a function to save
574 * pt_regs, the rec->flags REGS is set. When the function has been
575 * set up to save regs, the REG_EN flag is set. Once a function
576 * starts saving regs it will do so until all ftrace_ops are removed
577 * from tracing that function.
578 */
579 enum {
580 FTRACE_FL_ENABLED = (1UL << 31),
581 FTRACE_FL_REGS = (1UL << 30),
582 FTRACE_FL_REGS_EN = (1UL << 29),
583 FTRACE_FL_TRAMP = (1UL << 28),
584 FTRACE_FL_TRAMP_EN = (1UL << 27),
585 FTRACE_FL_IPMODIFY = (1UL << 26),
586 FTRACE_FL_DISABLED = (1UL << 25),
587 FTRACE_FL_DIRECT = (1UL << 24),
588 FTRACE_FL_DIRECT_EN = (1UL << 23),
589 FTRACE_FL_CALL_OPS = (1UL << 22),
590 FTRACE_FL_CALL_OPS_EN = (1UL << 21),
591 FTRACE_FL_TOUCHED = (1UL << 20),
592 FTRACE_FL_MODIFIED = (1UL << 19),
593 };
594
595 #define FTRACE_REF_MAX_SHIFT 19
596 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
597
598 #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
599
600 struct dyn_ftrace {
601 unsigned long ip; /* address of mcount call-site */
602 unsigned long flags;
603 struct dyn_arch_ftrace arch;
604 };
605
606 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
607 int remove, int reset);
608 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
609 unsigned int cnt, int remove, int reset);
610 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
611 int len, int reset);
612 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
613 int len, int reset);
614 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
615 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
616 void ftrace_free_filter(struct ftrace_ops *ops);
617 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
618
619 /*
620 * The FTRACE_UPDATE_* enum is used to pass information back
621 * from the ftrace_update_record() and ftrace_test_record()
622 * functions. These are called by the code update routines
623 * to find out what is to be done for a given function.
624 *
625 * IGNORE - The function is already what we want it to be
626 * MAKE_CALL - Start tracing the function
627 * MODIFY_CALL - Stop saving regs for the function
628 * MAKE_NOP - Stop tracing the function
629 */
630 enum {
631 FTRACE_UPDATE_IGNORE,
632 FTRACE_UPDATE_MAKE_CALL,
633 FTRACE_UPDATE_MODIFY_CALL,
634 FTRACE_UPDATE_MAKE_NOP,
635 };
636
637 enum {
638 FTRACE_ITER_FILTER = (1 << 0),
639 FTRACE_ITER_NOTRACE = (1 << 1),
640 FTRACE_ITER_PRINTALL = (1 << 2),
641 FTRACE_ITER_DO_PROBES = (1 << 3),
642 FTRACE_ITER_PROBE = (1 << 4),
643 FTRACE_ITER_MOD = (1 << 5),
644 FTRACE_ITER_ENABLED = (1 << 6),
645 FTRACE_ITER_TOUCHED = (1 << 7),
646 FTRACE_ITER_ADDRS = (1 << 8),
647 };
648
649 void arch_ftrace_update_code(int command);
650 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
651 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
652 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
653
654 struct ftrace_rec_iter;
655
656 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
657 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
658 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
659
660 #define for_ftrace_rec_iter(iter) \
661 for (iter = ftrace_rec_iter_start(); \
662 iter; \
663 iter = ftrace_rec_iter_next(iter))
664
665
666 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
667 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
668 void ftrace_run_stop_machine(int command);
669 unsigned long ftrace_location(unsigned long ip);
670 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
671 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
672 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
673
674 extern ftrace_func_t ftrace_trace_function;
675
676 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
677 struct inode *inode, struct file *file);
678 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
679 size_t cnt, loff_t *ppos);
680 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
681 size_t cnt, loff_t *ppos);
682 int ftrace_regex_release(struct inode *inode, struct file *file);
683
684 void __init
685 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
686
687 /* defined in arch */
688 extern int ftrace_dyn_arch_init(void);
689 extern void ftrace_replace_code(int enable);
690 extern int ftrace_update_ftrace_func(ftrace_func_t func);
691 extern void ftrace_caller(void);
692 extern void ftrace_regs_caller(void);
693 extern void ftrace_call(void);
694 extern void ftrace_regs_call(void);
695 extern void mcount_call(void);
696
697 void ftrace_modify_all_code(int command);
698
699 #ifndef FTRACE_ADDR
700 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
701 #endif
702
703 #ifndef FTRACE_GRAPH_ADDR
704 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
705 #endif
706
707 #ifndef FTRACE_REGS_ADDR
708 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
709 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
710 #else
711 # define FTRACE_REGS_ADDR FTRACE_ADDR
712 #endif
713 #endif
714
715 /*
716 * If an arch would like functions that are only traced
717 * by the function graph tracer to jump directly to its own
718 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
719 * to be that address to jump to.
720 */
721 #ifndef FTRACE_GRAPH_TRAMP_ADDR
722 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
723 #endif
724
725 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
726 extern void ftrace_graph_caller(void);
727 extern int ftrace_enable_ftrace_graph_caller(void);
728 extern int ftrace_disable_ftrace_graph_caller(void);
729 #else
ftrace_enable_ftrace_graph_caller(void)730 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
ftrace_disable_ftrace_graph_caller(void)731 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
732 #endif
733
734 /**
735 * ftrace_make_nop - convert code into nop
736 * @mod: module structure if called by module load initialization
737 * @rec: the call site record (e.g. mcount/fentry)
738 * @addr: the address that the call site should be calling
739 *
740 * This is a very sensitive operation and great care needs
741 * to be taken by the arch. The operation should carefully
742 * read the location, check to see if what is read is indeed
743 * what we expect it to be, and then on success of the compare,
744 * it should write to the location.
745 *
746 * The code segment at @rec->ip should be a caller to @addr
747 *
748 * Return must be:
749 * 0 on success
750 * -EFAULT on error reading the location
751 * -EINVAL on a failed compare of the contents
752 * -EPERM on error writing to the location
753 * Any other value will be considered a failure.
754 */
755 extern int ftrace_make_nop(struct module *mod,
756 struct dyn_ftrace *rec, unsigned long addr);
757
758 /**
759 * ftrace_need_init_nop - return whether nop call sites should be initialized
760 *
761 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
762 * need to call ftrace_init_nop() if the code is built with that flag.
763 * Architectures where this is not always the case may define their own
764 * condition.
765 *
766 * Return must be:
767 * 0 if ftrace_init_nop() should be called
768 * Nonzero if ftrace_init_nop() should not be called
769 */
770
771 #ifndef ftrace_need_init_nop
772 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
773 #endif
774
775 /**
776 * ftrace_init_nop - initialize a nop call site
777 * @mod: module structure if called by module load initialization
778 * @rec: the call site record (e.g. mcount/fentry)
779 *
780 * This is a very sensitive operation and great care needs
781 * to be taken by the arch. The operation should carefully
782 * read the location, check to see if what is read is indeed
783 * what we expect it to be, and then on success of the compare,
784 * it should write to the location.
785 *
786 * The code segment at @rec->ip should contain the contents created by
787 * the compiler
788 *
789 * Return must be:
790 * 0 on success
791 * -EFAULT on error reading the location
792 * -EINVAL on a failed compare of the contents
793 * -EPERM on error writing to the location
794 * Any other value will be considered a failure.
795 */
796 #ifndef ftrace_init_nop
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)797 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
798 {
799 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
800 }
801 #endif
802
803 /**
804 * ftrace_make_call - convert a nop call site into a call to addr
805 * @rec: the call site record (e.g. mcount/fentry)
806 * @addr: the address that the call site should call
807 *
808 * This is a very sensitive operation and great care needs
809 * to be taken by the arch. The operation should carefully
810 * read the location, check to see if what is read is indeed
811 * what we expect it to be, and then on success of the compare,
812 * it should write to the location.
813 *
814 * The code segment at @rec->ip should be a nop
815 *
816 * Return must be:
817 * 0 on success
818 * -EFAULT on error reading the location
819 * -EINVAL on a failed compare of the contents
820 * -EPERM on error writing to the location
821 * Any other value will be considered a failure.
822 */
823 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
824
825 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
826 defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
827 defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
828 /**
829 * ftrace_modify_call - convert from one addr to another (no nop)
830 * @rec: the call site record (e.g. mcount/fentry)
831 * @old_addr: the address expected to be currently called to
832 * @addr: the address to change to
833 *
834 * This is a very sensitive operation and great care needs
835 * to be taken by the arch. The operation should carefully
836 * read the location, check to see if what is read is indeed
837 * what we expect it to be, and then on success of the compare,
838 * it should write to the location.
839 *
840 * When using call ops, this is called when the associated ops change, even
841 * when (addr == old_addr).
842 *
843 * The code segment at @rec->ip should be a caller to @old_addr
844 *
845 * Return must be:
846 * 0 on success
847 * -EFAULT on error reading the location
848 * -EINVAL on a failed compare of the contents
849 * -EPERM on error writing to the location
850 * Any other value will be considered a failure.
851 */
852 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
853 unsigned long addr);
854 #else
855 /* Should never be called */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)856 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
857 unsigned long addr)
858 {
859 return -EINVAL;
860 }
861 #endif
862
863 extern int skip_trace(unsigned long ip);
864 extern void ftrace_module_init(struct module *mod);
865 extern void ftrace_module_enable(struct module *mod);
866 extern void ftrace_release_mod(struct module *mod);
867 #else /* CONFIG_DYNAMIC_FTRACE */
skip_trace(unsigned long ip)868 static inline int skip_trace(unsigned long ip) { return 0; }
ftrace_module_init(struct module * mod)869 static inline void ftrace_module_init(struct module *mod) { }
ftrace_module_enable(struct module * mod)870 static inline void ftrace_module_enable(struct module *mod) { }
ftrace_release_mod(struct module * mod)871 static inline void ftrace_release_mod(struct module *mod) { }
ftrace_text_reserved(const void * start,const void * end)872 static inline int ftrace_text_reserved(const void *start, const void *end)
873 {
874 return 0;
875 }
ftrace_location(unsigned long ip)876 static inline unsigned long ftrace_location(unsigned long ip)
877 {
878 return 0;
879 }
880
881 /*
882 * Again users of functions that have ftrace_ops may not
883 * have them defined when ftrace is not enabled, but these
884 * functions may still be called. Use a macro instead of inline.
885 */
886 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
887 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
888 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
889 #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
890 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
891 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
892 #define ftrace_free_filter(ops) do { } while (0)
893 #define ftrace_ops_set_global_filter(ops) do { } while (0)
894
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)895 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
896 size_t cnt, loff_t *ppos) { return -ENODEV; }
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)897 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
898 size_t cnt, loff_t *ppos) { return -ENODEV; }
899 static inline int
ftrace_regex_release(struct inode * inode,struct file * file)900 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
901
is_ftrace_trampoline(unsigned long addr)902 static inline bool is_ftrace_trampoline(unsigned long addr)
903 {
904 return false;
905 }
906 #endif /* CONFIG_DYNAMIC_FTRACE */
907
908 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
909 #ifndef ftrace_graph_func
910 #define ftrace_graph_func ftrace_stub
911 #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
912 #else
913 #define FTRACE_OPS_GRAPH_STUB 0
914 #endif
915 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
916
917 /* totally disable ftrace - can not re-enable after this */
918 void ftrace_kill(void);
919
tracer_disable(void)920 static inline void tracer_disable(void)
921 {
922 #ifdef CONFIG_FUNCTION_TRACER
923 ftrace_enabled = 0;
924 #endif
925 }
926
927 /*
928 * Ftrace disable/restore without lock. Some synchronization mechanism
929 * must be used to prevent ftrace_enabled to be changed between
930 * disable/restore.
931 */
__ftrace_enabled_save(void)932 static inline int __ftrace_enabled_save(void)
933 {
934 #ifdef CONFIG_FUNCTION_TRACER
935 int saved_ftrace_enabled = ftrace_enabled;
936 ftrace_enabled = 0;
937 return saved_ftrace_enabled;
938 #else
939 return 0;
940 #endif
941 }
942
__ftrace_enabled_restore(int enabled)943 static inline void __ftrace_enabled_restore(int enabled)
944 {
945 #ifdef CONFIG_FUNCTION_TRACER
946 ftrace_enabled = enabled;
947 #endif
948 }
949
950 /* All archs should have this, but we define it for consistency */
951 #ifndef ftrace_return_address0
952 # define ftrace_return_address0 __builtin_return_address(0)
953 #endif
954
955 /* Archs may use other ways for ADDR1 and beyond */
956 #ifndef ftrace_return_address
957 # ifdef CONFIG_FRAME_POINTER
958 # define ftrace_return_address(n) __builtin_return_address(n)
959 # else
960 # define ftrace_return_address(n) 0UL
961 # endif
962 #endif
963
964 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
965 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
966 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
967 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
968 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
969 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
970 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
971
get_lock_parent_ip(void)972 static __always_inline unsigned long get_lock_parent_ip(void)
973 {
974 unsigned long addr = CALLER_ADDR0;
975
976 if (!in_lock_functions(addr))
977 return addr;
978 addr = CALLER_ADDR1;
979 if (!in_lock_functions(addr))
980 return addr;
981 return CALLER_ADDR2;
982 }
983
984 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
985 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
986 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
987 #else
988 /*
989 * Use defines instead of static inlines because some arches will make code out
990 * of the CALLER_ADDR, when we really want these to be a real nop.
991 */
992 # define trace_preempt_on(a0, a1) do { } while (0)
993 # define trace_preempt_off(a0, a1) do { } while (0)
994 #endif
995
996 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
997 extern void ftrace_init(void);
998 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
999 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
1000 #else
1001 #define FTRACE_CALLSITE_SECTION "__mcount_loc"
1002 #endif
1003 #else
ftrace_init(void)1004 static inline void ftrace_init(void) { }
1005 #endif
1006
1007 /*
1008 * Structure that defines an entry function trace.
1009 * It's already packed but the attribute "packed" is needed
1010 * to remove extra padding at the end.
1011 */
1012 struct ftrace_graph_ent {
1013 unsigned long func; /* Current function */
1014 int depth;
1015 } __packed;
1016
1017 /*
1018 * Structure that defines a return function trace.
1019 * It's already packed but the attribute "packed" is needed
1020 * to remove extra padding at the end.
1021 */
1022 struct ftrace_graph_ret {
1023 unsigned long func; /* Current function */
1024 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
1025 unsigned long retval;
1026 #endif
1027 int depth;
1028 /* Number of functions that overran the depth limit for current task */
1029 unsigned int overrun;
1030 unsigned long long calltime;
1031 unsigned long long rettime;
1032 } __packed;
1033
1034 struct fgraph_ops;
1035
1036 /* Type of the callback handlers for tracing function graph*/
1037 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
1038 struct fgraph_ops *); /* return */
1039 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
1040 struct fgraph_ops *); /* entry */
1041
1042 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
1043 bool ftrace_pids_enabled(struct ftrace_ops *ops);
1044
1045 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1046
1047 struct fgraph_ops {
1048 trace_func_graph_ent_t entryfunc;
1049 trace_func_graph_ret_t retfunc;
1050 struct ftrace_ops ops; /* for the hash lists */
1051 void *private;
1052 trace_func_graph_ent_t saved_func;
1053 int idx;
1054 };
1055
1056 void *fgraph_reserve_data(int idx, int size_bytes);
1057 void *fgraph_retrieve_data(int idx, int *size_bytes);
1058
1059 /*
1060 * Stack of return addresses for functions
1061 * of a thread.
1062 * Used in struct thread_info
1063 */
1064 struct ftrace_ret_stack {
1065 unsigned long ret;
1066 unsigned long func;
1067 unsigned long long calltime;
1068 #ifdef CONFIG_FUNCTION_PROFILER
1069 unsigned long long subtime;
1070 #endif
1071 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
1072 unsigned long fp;
1073 #endif
1074 unsigned long *retp;
1075 };
1076
1077 /*
1078 * Primary handler of a function return.
1079 * It relays on ftrace_return_to_handler.
1080 * Defined in entry_32/64.S
1081 */
1082 extern void return_to_handler(void);
1083
1084 extern int
1085 function_graph_enter(unsigned long ret, unsigned long func,
1086 unsigned long frame_pointer, unsigned long *retp);
1087
1088 struct ftrace_ret_stack *
1089 ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
1090
1091 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1092 unsigned long ret, unsigned long *retp);
1093 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
1094
1095 /*
1096 * Sometimes we don't want to trace a function with the function
1097 * graph tracer but we want them to keep traced by the usual function
1098 * tracer if the function graph tracer is not configured.
1099 */
1100 #define __notrace_funcgraph notrace
1101
1102 #define FTRACE_RETFUNC_DEPTH 50
1103 #define FTRACE_RETSTACK_ALLOC_SIZE 32
1104
1105 extern int register_ftrace_graph(struct fgraph_ops *ops);
1106 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1107
1108 /**
1109 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1110 *
1111 * ftrace_graph_stop() is called when a severe error is detected in
1112 * the function graph tracing. This function is called by the critical
1113 * paths of function graph to keep those paths from doing any more harm.
1114 */
1115 DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1116
ftrace_graph_is_dead(void)1117 static inline bool ftrace_graph_is_dead(void)
1118 {
1119 return static_branch_unlikely(&kill_ftrace_graph);
1120 }
1121
1122 extern void ftrace_graph_stop(void);
1123
1124 /* The current handlers in use */
1125 extern trace_func_graph_ret_t ftrace_graph_return;
1126 extern trace_func_graph_ent_t ftrace_graph_entry;
1127
1128 extern void ftrace_graph_init_task(struct task_struct *t);
1129 extern void ftrace_graph_exit_task(struct task_struct *t);
1130 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
1131
1132 /* Used by assembly, but to quiet sparse warnings */
1133 extern struct ftrace_ops *function_trace_op;
1134
pause_graph_tracing(void)1135 static inline void pause_graph_tracing(void)
1136 {
1137 atomic_inc(¤t->tracing_graph_pause);
1138 }
1139
unpause_graph_tracing(void)1140 static inline void unpause_graph_tracing(void)
1141 {
1142 atomic_dec(¤t->tracing_graph_pause);
1143 }
1144 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
1145
1146 #define __notrace_funcgraph
1147
ftrace_graph_init_task(struct task_struct * t)1148 static inline void ftrace_graph_init_task(struct task_struct *t) { }
ftrace_graph_exit_task(struct task_struct * t)1149 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1150 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
1151
1152 /* Define as macros as fgraph_ops may not be defined */
1153 #define register_ftrace_graph(ops) ({ -1; })
1154 #define unregister_ftrace_graph(ops) do { } while (0)
1155
1156 static inline unsigned long
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)1157 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1158 unsigned long *retp)
1159 {
1160 return ret;
1161 }
1162
pause_graph_tracing(void)1163 static inline void pause_graph_tracing(void) { }
unpause_graph_tracing(void)1164 static inline void unpause_graph_tracing(void) { }
1165 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1166
1167 #ifdef CONFIG_TRACING
1168 enum ftrace_dump_mode;
1169
1170 #define MAX_TRACER_SIZE 100
1171 extern char ftrace_dump_on_oops[];
1172 extern int ftrace_dump_on_oops_enabled(void);
1173 extern int tracepoint_printk;
1174
1175 extern void disable_trace_on_warning(void);
1176 extern int __disable_trace_on_warning;
1177
1178 int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
1179 void *buffer, size_t *lenp, loff_t *ppos);
1180
1181 #else /* CONFIG_TRACING */
disable_trace_on_warning(void)1182 static inline void disable_trace_on_warning(void) { }
1183 #endif /* CONFIG_TRACING */
1184
1185 #ifdef CONFIG_FTRACE_SYSCALLS
1186
1187 unsigned long arch_syscall_addr(int nr);
1188
1189 #endif /* CONFIG_FTRACE_SYSCALLS */
1190
1191 #endif /* _LINUX_FTRACE_H */
1192