xref: /linux/include/linux/ftrace.h (revision cbba5d1b53fb82209feacb459edecb1ef8427119)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ftrace header.  For implementation details beyond the random comments
4  * scattered below, see: Documentation/trace/ftrace-design.rst
5  */
6 
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9 
10 #include <linux/trace_recursion.h>
11 #include <linux/trace_clock.h>
12 #include <linux/jump_label.h>
13 #include <linux/kallsyms.h>
14 #include <linux/linkage.h>
15 #include <linux/bitops.h>
16 #include <linux/ptrace.h>
17 #include <linux/ktime.h>
18 #include <linux/sched.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22 
23 #include <asm/ftrace.h>
24 
25 /*
26  * If the arch supports passing the variable contents of
27  * function_trace_op as the third parameter back from the
28  * mcount call, then the arch should define this as 1.
29  */
30 #ifndef ARCH_SUPPORTS_FTRACE_OPS
31 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 #endif
33 
34 #ifdef CONFIG_TRACING
35 extern void ftrace_boot_snapshot(void);
36 #else
ftrace_boot_snapshot(void)37 static inline void ftrace_boot_snapshot(void) { }
38 #endif
39 
40 struct ftrace_ops;
41 struct ftrace_regs;
42 struct dyn_ftrace;
43 
44 char *arch_ftrace_match_adjust(char *str, const char *search);
45 
46 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
47 unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
48 #else
49 unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
50 #endif
51 
52 #ifdef CONFIG_FUNCTION_TRACER
53 /*
54  * If the arch's mcount caller does not support all of ftrace's
55  * features, then it must call an indirect function that
56  * does. Or at least does enough to prevent any unwelcome side effects.
57  *
58  * Also define the function prototype that these architectures use
59  * to call the ftrace_ops_list_func().
60  */
61 #if !ARCH_SUPPORTS_FTRACE_OPS
62 # define FTRACE_FORCE_LIST_FUNC 1
63 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
64 #else
65 # define FTRACE_FORCE_LIST_FUNC 0
66 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
67 			       struct ftrace_ops *op, struct ftrace_regs *fregs);
68 #endif
69 extern const struct ftrace_ops ftrace_nop_ops;
70 extern const struct ftrace_ops ftrace_list_ops;
71 struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
72 #endif /* CONFIG_FUNCTION_TRACER */
73 
74 /* Main tracing buffer and events set up */
75 #ifdef CONFIG_TRACING
76 void trace_init(void);
77 void early_trace_init(void);
78 #else
trace_init(void)79 static inline void trace_init(void) { }
early_trace_init(void)80 static inline void early_trace_init(void) { }
81 #endif
82 
83 struct module;
84 struct ftrace_hash;
85 
86 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
87 	defined(CONFIG_DYNAMIC_FTRACE)
88 int
89 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
90 		   unsigned long *off, char **modname, char *sym);
91 #else
92 static inline int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)93 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
94 		   unsigned long *off, char **modname, char *sym)
95 {
96 	return 0;
97 }
98 #endif
99 
100 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
101 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
102 			   char *type, char *name,
103 			   char *module_name, int *exported);
104 #else
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)105 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
106 					 char *type, char *name,
107 					 char *module_name, int *exported)
108 {
109 	return -1;
110 }
111 #endif
112 
113 #ifdef CONFIG_FUNCTION_TRACER
114 
115 #include <linux/ftrace_regs.h>
116 
117 extern int ftrace_enabled;
118 
119 /**
120  * ftrace_regs - ftrace partial/optimal register set
121  *
122  * ftrace_regs represents a group of registers which is used at the
123  * function entry and exit. There are three types of registers.
124  *
125  * - Registers for passing the parameters to callee, including the stack
126  *   pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
127  * - Registers for passing the return values to caller.
128  *   (e.g. rax and rdx on x86_64)
129  * - Registers for hooking the function call and return including the
130  *   frame pointer (the frame pointer is architecture/config dependent)
131  *   (e.g. rip, rbp and rsp for x86_64)
132  *
133  * Also, architecture dependent fields can be used for internal process.
134  * (e.g. orig_ax on x86_64)
135  *
136  * Basically, ftrace_regs stores the registers related to the context.
137  * On function entry, registers for function parameters and hooking the
138  * function call are stored, and on function exit, registers for function
139  * return value and frame pointers are stored.
140  *
141  * And also, it dpends on the context that which registers are restored
142  * from the ftrace_regs.
143  * On the function entry, those registers will be restored except for
144  * the stack pointer, so that user can change the function parameters
145  * and instruction pointer (e.g. live patching.)
146  * On the function exit, only registers which is used for return values
147  * are restored.
148  *
149  * NOTE: user *must not* access regs directly, only do it via APIs, because
150  * the member can be changed according to the architecture.
151  * This is why the structure is empty here, so that nothing accesses
152  * the ftrace_regs directly.
153  */
154 struct ftrace_regs {
155 	/* Nothing to see here, use the accessor functions! */
156 };
157 
158 #define ftrace_regs_size()	sizeof(struct __arch_ftrace_regs)
159 
160 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
161 /*
162  * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
163  * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
164  * populated. It should return NULL otherwise.
165  */
arch_ftrace_get_regs(struct ftrace_regs * fregs)166 static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
167 {
168 	return &arch_ftrace_regs(fregs)->regs;
169 }
170 
171 /*
172  * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
173  * if to allow setting of the instruction pointer from the ftrace_regs when
174  * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
175  */
176 #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
177 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
178 
179 #ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
180 
181 static_assert(sizeof(struct pt_regs) == ftrace_regs_size());
182 
183 #endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
184 
ftrace_get_regs(struct ftrace_regs * fregs)185 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
186 {
187 	if (!fregs)
188 		return NULL;
189 
190 	return arch_ftrace_get_regs(fregs);
191 }
192 
193 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
194 	defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
195 
196 #ifndef arch_ftrace_partial_regs
197 #define arch_ftrace_partial_regs(regs) do {} while (0)
198 #endif
199 
200 static __always_inline struct pt_regs *
ftrace_partial_regs(struct ftrace_regs * fregs,struct pt_regs * regs)201 ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
202 {
203 	/*
204 	 * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory
205 	 * layout is including pt_regs. So always returns that address.
206 	 * Since arch_ftrace_get_regs() will check some members and may return
207 	 * NULL, we can not use it.
208 	 */
209 	regs = &arch_ftrace_regs(fregs)->regs;
210 
211 	/* Allow arch specific updates to regs. */
212 	arch_ftrace_partial_regs(regs);
213 	return regs;
214 }
215 
216 #endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
217 
218 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
219 
220 /*
221  * Please define arch dependent pt_regs which compatible to the
222  * perf_arch_fetch_caller_regs() but based on ftrace_regs.
223  * This requires
224  *   - user_mode(_regs) returns false (always kernel mode).
225  *   - able to use the _regs for stack trace.
226  */
227 #ifndef arch_ftrace_fill_perf_regs
228 /* As same as perf_arch_fetch_caller_regs(), do nothing by default */
229 #define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0)
230 #endif
231 
232 static __always_inline struct pt_regs *
ftrace_fill_perf_regs(struct ftrace_regs * fregs,struct pt_regs * regs)233 ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
234 {
235 	arch_ftrace_fill_perf_regs(fregs, regs);
236 	return regs;
237 }
238 
239 #else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
240 
241 static __always_inline struct pt_regs *
ftrace_fill_perf_regs(struct ftrace_regs * fregs,struct pt_regs * regs)242 ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
243 {
244 	return &arch_ftrace_regs(fregs)->regs;
245 }
246 
247 #endif
248 
249 /*
250  * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
251  * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
252  */
ftrace_regs_has_args(struct ftrace_regs * fregs)253 static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
254 {
255 	if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
256 		return true;
257 
258 	return ftrace_get_regs(fregs) != NULL;
259 }
260 
261 #ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
262 static __always_inline unsigned long
ftrace_regs_get_kernel_stack_nth(struct ftrace_regs * fregs,unsigned int nth)263 ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth)
264 {
265 	unsigned long *stackp;
266 
267 	stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs);
268 	if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) ==
269 	    ((unsigned long)stackp & ~(THREAD_SIZE - 1)))
270 		return *(stackp + nth);
271 
272 	return 0;
273 }
274 #else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
275 #define ftrace_regs_get_kernel_stack_nth(fregs, nth)	(0L)
276 #endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
277 
278 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
279 			      struct ftrace_ops *op, struct ftrace_regs *fregs);
280 
281 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
282 
283 /*
284  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
285  * set in the flags member.
286  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
287  * IPMODIFY are a kind of attribute flags which can be set only before
288  * registering the ftrace_ops, and can not be modified while registered.
289  * Changing those attribute flags after registering ftrace_ops will
290  * cause unexpected results.
291  *
292  * ENABLED - set/unset when ftrace_ops is registered/unregistered
293  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
294  *           allocated ftrace_ops which need special care
295  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
296  *            and passed to the callback. If this flag is set, but the
297  *            architecture does not support passing regs
298  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
299  *            ftrace_ops will fail to register, unless the next flag
300  *            is set.
301  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
302  *            handler can handle an arch that does not save regs
303  *            (the handler tests if regs == NULL), then it can set
304  *            this flag instead. It will not fail registering the ftrace_ops
305  *            but, the regs field will be NULL if the arch does not support
306  *            passing regs to the handler.
307  *            Note, if this flag is set, the SAVE_REGS flag will automatically
308  *            get set upon registering the ftrace_ops, if the arch supports it.
309  * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
310  *            that the call back needs recursion protection. If it does
311  *            not set this, then the ftrace infrastructure will assume
312  *            that the callback can handle recursion on its own.
313  * STUB   - The ftrace_ops is just a place holder.
314  * INITIALIZED - The ftrace_ops has already been initialized (first use time
315  *            register_ftrace_function() is called, it will initialized the ops)
316  * DELETED - The ops are being deleted, do not let them be registered again.
317  * ADDING  - The ops is in the process of being added.
318  * REMOVING - The ops is in the process of being removed.
319  * MODIFYING - The ops is in the process of changing its filter functions.
320  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
321  *            The arch specific code sets this flag when it allocated a
322  *            trampoline. This lets the arch know that it can update the
323  *            trampoline in case the callback function changes.
324  *            The ftrace_ops trampoline can be set by the ftrace users, and
325  *            in such cases the arch must not modify it. Only the arch ftrace
326  *            core code should set this flag.
327  * IPMODIFY - The ops can modify the IP register. This can only be set with
328  *            SAVE_REGS. If another ops with this flag set is already registered
329  *            for any of the functions that this ops will be registered for, then
330  *            this ops will fail to register or set_filter_ip.
331  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
332  * RCU     - Set when the ops can only be called when RCU is watching.
333  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
334  * PERMANENT - Set when the ops is permanent and should not be affected by
335  *             ftrace_enabled.
336  * DIRECT - Used by the direct ftrace_ops helper for direct functions
337  *            (internal ftrace only, should not be used by others)
338  * SUBOP  - Is controlled by another op in field managed.
339  * GRAPH  - Is a component of the fgraph_ops structure
340  */
341 enum {
342 	FTRACE_OPS_FL_ENABLED			= BIT(0),
343 	FTRACE_OPS_FL_DYNAMIC			= BIT(1),
344 	FTRACE_OPS_FL_SAVE_REGS			= BIT(2),
345 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= BIT(3),
346 	FTRACE_OPS_FL_RECURSION			= BIT(4),
347 	FTRACE_OPS_FL_STUB			= BIT(5),
348 	FTRACE_OPS_FL_INITIALIZED		= BIT(6),
349 	FTRACE_OPS_FL_DELETED			= BIT(7),
350 	FTRACE_OPS_FL_ADDING			= BIT(8),
351 	FTRACE_OPS_FL_REMOVING			= BIT(9),
352 	FTRACE_OPS_FL_MODIFYING			= BIT(10),
353 	FTRACE_OPS_FL_ALLOC_TRAMP		= BIT(11),
354 	FTRACE_OPS_FL_IPMODIFY			= BIT(12),
355 	FTRACE_OPS_FL_PID			= BIT(13),
356 	FTRACE_OPS_FL_RCU			= BIT(14),
357 	FTRACE_OPS_FL_TRACE_ARRAY		= BIT(15),
358 	FTRACE_OPS_FL_PERMANENT                 = BIT(16),
359 	FTRACE_OPS_FL_DIRECT			= BIT(17),
360 	FTRACE_OPS_FL_SUBOP			= BIT(18),
361 	FTRACE_OPS_FL_GRAPH			= BIT(19),
362 };
363 
364 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
365 #define FTRACE_OPS_FL_SAVE_ARGS                        FTRACE_OPS_FL_SAVE_REGS
366 #else
367 #define FTRACE_OPS_FL_SAVE_ARGS                        0
368 #endif
369 
370 /*
371  * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
372  * to a ftrace_ops. Note, the requests may fail.
373  *
374  * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
375  *                              function as an ops with IPMODIFY. Called
376  *                              when the DIRECT ops is being registered.
377  *                              This is called with both direct_mutex and
378  *                              ftrace_lock are locked.
379  *
380  * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
381  *                              function as an ops with IPMODIFY. Called
382  *                              when the other ops (the one with IPMODIFY)
383  *                              is being registered.
384  *                              This is called with direct_mutex locked.
385  *
386  * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
387  *                               function as an ops with IPMODIFY. Called
388  *                               when the other ops (the one with IPMODIFY)
389  *                               is being unregistered.
390  *                               This is called with direct_mutex locked.
391  */
392 enum ftrace_ops_cmd {
393 	FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
394 	FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
395 	FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
396 };
397 
398 /*
399  * For most ftrace_ops_cmd,
400  * Returns:
401  *        0 - Success.
402  *        Negative on failure. The return value is dependent on the
403  *        callback.
404  */
405 typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
406 
407 #ifdef CONFIG_DYNAMIC_FTRACE
408 /* The hash used to know what functions callbacks trace */
409 struct ftrace_ops_hash {
410 	struct ftrace_hash __rcu	*notrace_hash;
411 	struct ftrace_hash __rcu	*filter_hash;
412 	struct mutex			regex_lock;
413 };
414 
415 void ftrace_free_init_mem(void);
416 void ftrace_free_mem(struct module *mod, void *start, void *end);
417 #else
ftrace_free_init_mem(void)418 static inline void ftrace_free_init_mem(void)
419 {
420 	ftrace_boot_snapshot();
421 }
ftrace_free_mem(struct module * mod,void * start,void * end)422 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
423 #endif
424 
425 /*
426  * Note, ftrace_ops can be referenced outside of RCU protection, unless
427  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
428  * core data, the unregistering of it will perform a scheduling on all CPUs
429  * to make sure that there are no more users. Depending on the load of the
430  * system that may take a bit of time.
431  *
432  * Any private data added must also take care not to be freed and if private
433  * data is added to a ftrace_ops that is in core code, the user of the
434  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
435  */
436 struct ftrace_ops {
437 	ftrace_func_t			func;
438 	struct ftrace_ops __rcu		*next;
439 	unsigned long			flags;
440 	void				*private;
441 	ftrace_func_t			saved_func;
442 #ifdef CONFIG_DYNAMIC_FTRACE
443 	struct ftrace_ops_hash		local_hash;
444 	struct ftrace_ops_hash		*func_hash;
445 	struct ftrace_ops_hash		old_hash;
446 	unsigned long			trampoline;
447 	unsigned long			trampoline_size;
448 	struct list_head		list;
449 	struct list_head		subop_list;
450 	ftrace_ops_func_t		ops_func;
451 	struct ftrace_ops		*managed;
452 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
453 	unsigned long			direct_call;
454 #endif
455 #endif
456 };
457 
458 extern struct ftrace_ops __rcu *ftrace_ops_list;
459 extern struct ftrace_ops ftrace_list_end;
460 
461 /*
462  * Traverse the ftrace_ops_list, invoking all entries.  The reason that we
463  * can use rcu_dereference_raw_check() is that elements removed from this list
464  * are simply leaked, so there is no need to interact with a grace-period
465  * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
466  * concurrent insertions into the ftrace_ops_list.
467  *
468  * Silly Alpha and silly pointer-speculation compiler optimizations!
469  */
470 #define do_for_each_ftrace_op(op, list)			\
471 	op = rcu_dereference_raw_check(list);			\
472 	do
473 
474 /*
475  * Optimized for just a single item in the list (as that is the normal case).
476  */
477 #define while_for_each_ftrace_op(op)				\
478 	while (likely(op = rcu_dereference_raw_check((op)->next)) &&	\
479 	       unlikely((op) != &ftrace_list_end))
480 
481 /*
482  * Type of the current tracing.
483  */
484 enum ftrace_tracing_type_t {
485 	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
486 	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
487 };
488 
489 /* Current tracing type, default is FTRACE_TYPE_ENTER */
490 extern enum ftrace_tracing_type_t ftrace_tracing_type;
491 
492 /*
493  * The ftrace_ops must be a static and should also
494  * be read_mostly.  These functions do modify read_mostly variables
495  * so use them sparely. Never free an ftrace_op or modify the
496  * next pointer after it has been registered. Even after unregistering
497  * it, the next pointer may still be used internally.
498  */
499 int register_ftrace_function(struct ftrace_ops *ops);
500 int unregister_ftrace_function(struct ftrace_ops *ops);
501 
502 extern void ftrace_stub(unsigned long a0, unsigned long a1,
503 			struct ftrace_ops *op, struct ftrace_regs *fregs);
504 
505 
506 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
507 #else /* !CONFIG_FUNCTION_TRACER */
508 /*
509  * (un)register_ftrace_function must be a macro since the ops parameter
510  * must not be evaluated.
511  */
512 #define register_ftrace_function(ops) ({ 0; })
513 #define unregister_ftrace_function(ops) ({ 0; })
ftrace_kill(void)514 static inline void ftrace_kill(void) { }
ftrace_free_init_mem(void)515 static inline void ftrace_free_init_mem(void) { }
ftrace_free_mem(struct module * mod,void * start,void * end)516 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)517 static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
518 {
519 	return -EOPNOTSUPP;
520 }
521 #endif /* CONFIG_FUNCTION_TRACER */
522 
523 struct ftrace_func_entry {
524 	struct hlist_node hlist;
525 	unsigned long ip;
526 	unsigned long direct; /* for direct lookup only */
527 };
528 
529 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
530 unsigned long ftrace_find_rec_direct(unsigned long ip);
531 int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
532 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
533 			     bool free_filters);
534 int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
535 int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
536 
537 void ftrace_stub_direct_tramp(void);
538 
539 #else
540 struct ftrace_ops;
ftrace_find_rec_direct(unsigned long ip)541 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
542 {
543 	return 0;
544 }
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)545 static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
546 {
547 	return -ENODEV;
548 }
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)549 static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
550 					   bool free_filters)
551 {
552 	return -ENODEV;
553 }
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)554 static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
555 {
556 	return -ENODEV;
557 }
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)558 static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
559 {
560 	return -ENODEV;
561 }
562 
563 /*
564  * This must be implemented by the architecture.
565  * It is the way the ftrace direct_ops helper, when called
566  * via ftrace (because there's other callbacks besides the
567  * direct call), can inform the architecture's trampoline that this
568  * routine has a direct caller, and what the caller is.
569  *
570  * For example, in x86, it returns the direct caller
571  * callback function via the regs->orig_ax parameter.
572  * Then in the ftrace trampoline, if this is set, it makes
573  * the return from the trampoline jump to the direct caller
574  * instead of going back to the function it just traced.
575  */
arch_ftrace_set_direct_caller(struct ftrace_regs * fregs,unsigned long addr)576 static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
577 						 unsigned long addr) { }
578 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
579 
580 #ifdef CONFIG_STACK_TRACER
581 
582 int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
583 		       size_t *lenp, loff_t *ppos);
584 
585 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
586 DECLARE_PER_CPU(int, disable_stack_tracer);
587 
588 /**
589  * stack_tracer_disable - temporarily disable the stack tracer
590  *
591  * There's a few locations (namely in RCU) where stack tracing
592  * cannot be executed. This function is used to disable stack
593  * tracing during those critical sections.
594  *
595  * This function must be called with preemption or interrupts
596  * disabled and stack_tracer_enable() must be called shortly after
597  * while preemption or interrupts are still disabled.
598  */
stack_tracer_disable(void)599 static inline void stack_tracer_disable(void)
600 {
601 	/* Preemption or interrupts must be disabled */
602 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
603 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
604 	this_cpu_inc(disable_stack_tracer);
605 }
606 
607 /**
608  * stack_tracer_enable - re-enable the stack tracer
609  *
610  * After stack_tracer_disable() is called, stack_tracer_enable()
611  * must be called shortly afterward.
612  */
stack_tracer_enable(void)613 static inline void stack_tracer_enable(void)
614 {
615 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
616 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
617 	this_cpu_dec(disable_stack_tracer);
618 }
619 #else
stack_tracer_disable(void)620 static inline void stack_tracer_disable(void) { }
stack_tracer_enable(void)621 static inline void stack_tracer_enable(void) { }
622 #endif
623 
624 enum {
625 	FTRACE_UPDATE_CALLS		= (1 << 0),
626 	FTRACE_DISABLE_CALLS		= (1 << 1),
627 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
628 	FTRACE_START_FUNC_RET		= (1 << 3),
629 	FTRACE_STOP_FUNC_RET		= (1 << 4),
630 	FTRACE_MAY_SLEEP		= (1 << 5),
631 };
632 
633 /* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
634 #ifndef ftrace_get_symaddr
635 /**
636  * ftrace_get_symaddr - return the symbol address from fentry_ip
637  * @fentry_ip: the address of ftrace location
638  *
639  * Get the symbol address from @fentry_ip (fast path). If there is no fast
640  * search path, this returns 0.
641  * User may need to use kallsyms API to find the symbol address.
642  */
643 #define ftrace_get_symaddr(fentry_ip) (0)
644 #endif
645 
646 void ftrace_sync_ipi(void *data);
647 
648 #ifdef CONFIG_DYNAMIC_FTRACE
649 
650 void ftrace_arch_code_modify_prepare(void);
651 void ftrace_arch_code_modify_post_process(void);
652 
653 enum ftrace_bug_type {
654 	FTRACE_BUG_UNKNOWN,
655 	FTRACE_BUG_INIT,
656 	FTRACE_BUG_NOP,
657 	FTRACE_BUG_CALL,
658 	FTRACE_BUG_UPDATE,
659 };
660 extern enum ftrace_bug_type ftrace_bug_type;
661 
662 /*
663  * Archs can set this to point to a variable that holds the value that was
664  * expected at the call site before calling ftrace_bug().
665  */
666 extern const void *ftrace_expected;
667 
668 void ftrace_bug(int err, struct dyn_ftrace *rec);
669 
670 struct seq_file;
671 
672 extern int ftrace_text_reserved(const void *start, const void *end);
673 
674 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
675 
676 bool is_ftrace_trampoline(unsigned long addr);
677 
678 /*
679  * The dyn_ftrace record's flags field is split into two parts.
680  * the first part which is '0-FTRACE_REF_MAX' is a counter of
681  * the number of callbacks that have registered the function that
682  * the dyn_ftrace descriptor represents.
683  *
684  * The second part is a mask:
685  *  ENABLED - the function is being traced
686  *  REGS    - the record wants the function to save regs
687  *  REGS_EN - the function is set up to save regs.
688  *  IPMODIFY - the record allows for the IP address to be changed.
689  *  DISABLED - the record is not ready to be touched yet
690  *  DIRECT   - there is a direct function to call
691  *  CALL_OPS - the record can use callsite-specific ops
692  *  CALL_OPS_EN - the function is set up to use callsite-specific ops
693  *  TOUCHED  - A callback was added since boot up
694  *  MODIFIED - The function had IPMODIFY or DIRECT attached to it
695  *
696  * When a new ftrace_ops is registered and wants a function to save
697  * pt_regs, the rec->flags REGS is set. When the function has been
698  * set up to save regs, the REG_EN flag is set. Once a function
699  * starts saving regs it will do so until all ftrace_ops are removed
700  * from tracing that function.
701  */
702 enum {
703 	FTRACE_FL_ENABLED	= (1UL << 31),
704 	FTRACE_FL_REGS		= (1UL << 30),
705 	FTRACE_FL_REGS_EN	= (1UL << 29),
706 	FTRACE_FL_TRAMP		= (1UL << 28),
707 	FTRACE_FL_TRAMP_EN	= (1UL << 27),
708 	FTRACE_FL_IPMODIFY	= (1UL << 26),
709 	FTRACE_FL_DISABLED	= (1UL << 25),
710 	FTRACE_FL_DIRECT	= (1UL << 24),
711 	FTRACE_FL_DIRECT_EN	= (1UL << 23),
712 	FTRACE_FL_CALL_OPS	= (1UL << 22),
713 	FTRACE_FL_CALL_OPS_EN	= (1UL << 21),
714 	FTRACE_FL_TOUCHED	= (1UL << 20),
715 	FTRACE_FL_MODIFIED	= (1UL << 19),
716 };
717 
718 #define FTRACE_REF_MAX_SHIFT	19
719 #define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
720 
721 #define ftrace_rec_count(rec)	((rec)->flags & FTRACE_REF_MAX)
722 
723 struct dyn_ftrace {
724 	unsigned long		ip; /* address of mcount call-site */
725 	unsigned long		flags;
726 	struct dyn_arch_ftrace	arch;
727 };
728 
729 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
730 			 int remove, int reset);
731 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
732 			  unsigned int cnt, int remove, int reset);
733 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
734 		       int len, int reset);
735 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
736 			int len, int reset);
737 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
738 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
739 void ftrace_free_filter(struct ftrace_ops *ops);
740 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
741 
742 /*
743  * The FTRACE_UPDATE_* enum is used to pass information back
744  * from the ftrace_update_record() and ftrace_test_record()
745  * functions. These are called by the code update routines
746  * to find out what is to be done for a given function.
747  *
748  *  IGNORE           - The function is already what we want it to be
749  *  MAKE_CALL        - Start tracing the function
750  *  MODIFY_CALL      - Stop saving regs for the function
751  *  MAKE_NOP         - Stop tracing the function
752  */
753 enum {
754 	FTRACE_UPDATE_IGNORE,
755 	FTRACE_UPDATE_MAKE_CALL,
756 	FTRACE_UPDATE_MODIFY_CALL,
757 	FTRACE_UPDATE_MAKE_NOP,
758 };
759 
760 enum {
761 	FTRACE_ITER_FILTER	= (1 << 0),
762 	FTRACE_ITER_NOTRACE	= (1 << 1),
763 	FTRACE_ITER_PRINTALL	= (1 << 2),
764 	FTRACE_ITER_DO_PROBES	= (1 << 3),
765 	FTRACE_ITER_PROBE	= (1 << 4),
766 	FTRACE_ITER_MOD		= (1 << 5),
767 	FTRACE_ITER_ENABLED	= (1 << 6),
768 	FTRACE_ITER_TOUCHED	= (1 << 7),
769 	FTRACE_ITER_ADDRS	= (1 << 8),
770 };
771 
772 void arch_ftrace_update_code(int command);
773 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
774 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
775 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
776 
777 struct ftrace_rec_iter;
778 
779 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
780 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
781 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
782 
783 #define for_ftrace_rec_iter(iter)		\
784 	for (iter = ftrace_rec_iter_start();	\
785 	     iter;				\
786 	     iter = ftrace_rec_iter_next(iter))
787 
788 
789 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
790 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
791 void ftrace_run_stop_machine(int command);
792 unsigned long ftrace_location(unsigned long ip);
793 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
794 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
795 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
796 
797 extern ftrace_func_t ftrace_trace_function;
798 
799 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
800 		  struct inode *inode, struct file *file);
801 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
802 			    size_t cnt, loff_t *ppos);
803 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
804 			     size_t cnt, loff_t *ppos);
805 int ftrace_regex_release(struct inode *inode, struct file *file);
806 
807 void __init
808 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
809 
810 /* defined in arch */
811 extern int ftrace_dyn_arch_init(void);
812 extern void ftrace_replace_code(int enable);
813 extern int ftrace_update_ftrace_func(ftrace_func_t func);
814 extern void ftrace_caller(void);
815 extern void ftrace_regs_caller(void);
816 extern void ftrace_call(void);
817 extern void ftrace_regs_call(void);
818 extern void mcount_call(void);
819 
820 void ftrace_modify_all_code(int command);
821 
822 #ifndef FTRACE_ADDR
823 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
824 #endif
825 
826 #ifndef FTRACE_GRAPH_ADDR
827 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
828 #endif
829 
830 #ifndef FTRACE_REGS_ADDR
831 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
832 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
833 #else
834 # define FTRACE_REGS_ADDR FTRACE_ADDR
835 #endif
836 #endif
837 
838 /*
839  * If an arch would like functions that are only traced
840  * by the function graph tracer to jump directly to its own
841  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
842  * to be that address to jump to.
843  */
844 #ifndef FTRACE_GRAPH_TRAMP_ADDR
845 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
846 #endif
847 
848 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
849 extern void ftrace_graph_caller(void);
850 extern int ftrace_enable_ftrace_graph_caller(void);
851 extern int ftrace_disable_ftrace_graph_caller(void);
852 #else
ftrace_enable_ftrace_graph_caller(void)853 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
ftrace_disable_ftrace_graph_caller(void)854 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
855 #endif
856 
857 /**
858  * ftrace_make_nop - convert code into nop
859  * @mod: module structure if called by module load initialization
860  * @rec: the call site record (e.g. mcount/fentry)
861  * @addr: the address that the call site should be calling
862  *
863  * This is a very sensitive operation and great care needs
864  * to be taken by the arch.  The operation should carefully
865  * read the location, check to see if what is read is indeed
866  * what we expect it to be, and then on success of the compare,
867  * it should write to the location.
868  *
869  * The code segment at @rec->ip should be a caller to @addr
870  *
871  * Return must be:
872  *  0 on success
873  *  -EFAULT on error reading the location
874  *  -EINVAL on a failed compare of the contents
875  *  -EPERM  on error writing to the location
876  * Any other value will be considered a failure.
877  */
878 extern int ftrace_make_nop(struct module *mod,
879 			   struct dyn_ftrace *rec, unsigned long addr);
880 
881 /**
882  * ftrace_need_init_nop - return whether nop call sites should be initialized
883  *
884  * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
885  * need to call ftrace_init_nop() if the code is built with that flag.
886  * Architectures where this is not always the case may define their own
887  * condition.
888  *
889  * Return must be:
890  *  0	    if ftrace_init_nop() should be called
891  *  Nonzero if ftrace_init_nop() should not be called
892  */
893 
894 #ifndef ftrace_need_init_nop
895 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
896 #endif
897 
898 /**
899  * ftrace_init_nop - initialize a nop call site
900  * @mod: module structure if called by module load initialization
901  * @rec: the call site record (e.g. mcount/fentry)
902  *
903  * This is a very sensitive operation and great care needs
904  * to be taken by the arch.  The operation should carefully
905  * read the location, check to see if what is read is indeed
906  * what we expect it to be, and then on success of the compare,
907  * it should write to the location.
908  *
909  * The code segment at @rec->ip should contain the contents created by
910  * the compiler
911  *
912  * Return must be:
913  *  0 on success
914  *  -EFAULT on error reading the location
915  *  -EINVAL on a failed compare of the contents
916  *  -EPERM  on error writing to the location
917  * Any other value will be considered a failure.
918  */
919 #ifndef ftrace_init_nop
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)920 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
921 {
922 	return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
923 }
924 #endif
925 
926 /**
927  * ftrace_make_call - convert a nop call site into a call to addr
928  * @rec: the call site record (e.g. mcount/fentry)
929  * @addr: the address that the call site should call
930  *
931  * This is a very sensitive operation and great care needs
932  * to be taken by the arch.  The operation should carefully
933  * read the location, check to see if what is read is indeed
934  * what we expect it to be, and then on success of the compare,
935  * it should write to the location.
936  *
937  * The code segment at @rec->ip should be a nop
938  *
939  * Return must be:
940  *  0 on success
941  *  -EFAULT on error reading the location
942  *  -EINVAL on a failed compare of the contents
943  *  -EPERM  on error writing to the location
944  * Any other value will be considered a failure.
945  */
946 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
947 
948 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
949 	defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
950 	defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
951 /**
952  * ftrace_modify_call - convert from one addr to another (no nop)
953  * @rec: the call site record (e.g. mcount/fentry)
954  * @old_addr: the address expected to be currently called to
955  * @addr: the address to change to
956  *
957  * This is a very sensitive operation and great care needs
958  * to be taken by the arch.  The operation should carefully
959  * read the location, check to see if what is read is indeed
960  * what we expect it to be, and then on success of the compare,
961  * it should write to the location.
962  *
963  * When using call ops, this is called when the associated ops change, even
964  * when (addr == old_addr).
965  *
966  * The code segment at @rec->ip should be a caller to @old_addr
967  *
968  * Return must be:
969  *  0 on success
970  *  -EFAULT on error reading the location
971  *  -EINVAL on a failed compare of the contents
972  *  -EPERM  on error writing to the location
973  * Any other value will be considered a failure.
974  */
975 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
976 			      unsigned long addr);
977 #else
978 /* Should never be called */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)979 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
980 				     unsigned long addr)
981 {
982 	return -EINVAL;
983 }
984 #endif
985 
986 extern int skip_trace(unsigned long ip);
987 extern void ftrace_module_init(struct module *mod);
988 extern void ftrace_module_enable(struct module *mod);
989 extern void ftrace_release_mod(struct module *mod);
990 #else /* CONFIG_DYNAMIC_FTRACE */
skip_trace(unsigned long ip)991 static inline int skip_trace(unsigned long ip) { return 0; }
ftrace_module_init(struct module * mod)992 static inline void ftrace_module_init(struct module *mod) { }
ftrace_module_enable(struct module * mod)993 static inline void ftrace_module_enable(struct module *mod) { }
ftrace_release_mod(struct module * mod)994 static inline void ftrace_release_mod(struct module *mod) { }
ftrace_text_reserved(const void * start,const void * end)995 static inline int ftrace_text_reserved(const void *start, const void *end)
996 {
997 	return 0;
998 }
ftrace_location(unsigned long ip)999 static inline unsigned long ftrace_location(unsigned long ip)
1000 {
1001 	return 0;
1002 }
1003 
1004 /*
1005  * Again users of functions that have ftrace_ops may not
1006  * have them defined when ftrace is not enabled, but these
1007  * functions may still be called. Use a macro instead of inline.
1008  */
1009 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
1010 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
1011 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
1012 #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
1013 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
1014 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
1015 #define ftrace_free_filter(ops) do { } while (0)
1016 #define ftrace_ops_set_global_filter(ops) do { } while (0)
1017 
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1018 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
1019 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1020 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
1021 			     size_t cnt, loff_t *ppos) { return -ENODEV; }
1022 static inline int
ftrace_regex_release(struct inode * inode,struct file * file)1023 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
1024 
is_ftrace_trampoline(unsigned long addr)1025 static inline bool is_ftrace_trampoline(unsigned long addr)
1026 {
1027 	return false;
1028 }
1029 #endif /* CONFIG_DYNAMIC_FTRACE */
1030 
1031 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1032 #ifndef ftrace_graph_func
1033 #define ftrace_graph_func ftrace_stub
1034 #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
1035 #else
1036 #define FTRACE_OPS_GRAPH_STUB 0
1037 #endif
1038 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1039 
1040 /* totally disable ftrace - can not re-enable after this */
1041 void ftrace_kill(void);
1042 
tracer_disable(void)1043 static inline void tracer_disable(void)
1044 {
1045 #ifdef CONFIG_FUNCTION_TRACER
1046 	ftrace_enabled = 0;
1047 #endif
1048 }
1049 
1050 /*
1051  * Ftrace disable/restore without lock. Some synchronization mechanism
1052  * must be used to prevent ftrace_enabled to be changed between
1053  * disable/restore.
1054  */
__ftrace_enabled_save(void)1055 static inline int __ftrace_enabled_save(void)
1056 {
1057 #ifdef CONFIG_FUNCTION_TRACER
1058 	int saved_ftrace_enabled = ftrace_enabled;
1059 	ftrace_enabled = 0;
1060 	return saved_ftrace_enabled;
1061 #else
1062 	return 0;
1063 #endif
1064 }
1065 
__ftrace_enabled_restore(int enabled)1066 static inline void __ftrace_enabled_restore(int enabled)
1067 {
1068 #ifdef CONFIG_FUNCTION_TRACER
1069 	ftrace_enabled = enabled;
1070 #endif
1071 }
1072 
1073 /* All archs should have this, but we define it for consistency */
1074 #ifndef ftrace_return_address0
1075 # define ftrace_return_address0 __builtin_return_address(0)
1076 #endif
1077 
1078 /* Archs may use other ways for ADDR1 and beyond */
1079 #ifndef ftrace_return_address
1080 # ifdef CONFIG_FRAME_POINTER
1081 #  define ftrace_return_address(n) __builtin_return_address(n)
1082 # else
1083 #  define ftrace_return_address(n) 0UL
1084 # endif
1085 #endif
1086 
1087 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
1088 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
1089 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
1090 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
1091 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
1092 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
1093 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
1094 
get_lock_parent_ip(void)1095 static __always_inline unsigned long get_lock_parent_ip(void)
1096 {
1097 	unsigned long addr = CALLER_ADDR0;
1098 
1099 	if (!in_lock_functions(addr))
1100 		return addr;
1101 	addr = CALLER_ADDR1;
1102 	if (!in_lock_functions(addr))
1103 		return addr;
1104 	return CALLER_ADDR2;
1105 }
1106 
1107 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
1108   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
1109   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
1110 #else
1111 /*
1112  * Use defines instead of static inlines because some arches will make code out
1113  * of the CALLER_ADDR, when we really want these to be a real nop.
1114  */
1115 # define trace_preempt_on(a0, a1) do { } while (0)
1116 # define trace_preempt_off(a0, a1) do { } while (0)
1117 #endif
1118 
1119 #ifdef CONFIG_DYNAMIC_FTRACE
1120 extern void ftrace_init(void);
1121 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
1122 #define FTRACE_CALLSITE_SECTION	"__patchable_function_entries"
1123 #else
1124 #define FTRACE_CALLSITE_SECTION	"__mcount_loc"
1125 #endif
1126 #else
ftrace_init(void)1127 static inline void ftrace_init(void) { }
1128 #endif
1129 
1130 /*
1131  * Structure that defines an entry function trace.
1132  * It's already packed but the attribute "packed" is needed
1133  * to remove extra padding at the end.
1134  */
1135 struct ftrace_graph_ent {
1136 	unsigned long func; /* Current function */
1137 	int depth;
1138 } __packed;
1139 
1140 /*
1141  * Structure that defines an entry function trace with retaddr.
1142  * It's already packed but the attribute "packed" is needed
1143  * to remove extra padding at the end.
1144  */
1145 struct fgraph_retaddr_ent {
1146 	unsigned long func; /* Current function */
1147 	int depth;
1148 	unsigned long retaddr;  /* Return address */
1149 } __packed;
1150 
1151 /*
1152  * Structure that defines a return function trace.
1153  * It's already packed but the attribute "packed" is needed
1154  * to remove extra padding at the end.
1155  */
1156 struct ftrace_graph_ret {
1157 	unsigned long func; /* Current function */
1158 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
1159 	unsigned long retval;
1160 #endif
1161 	int depth;
1162 	/* Number of functions that overran the depth limit for current task */
1163 	unsigned int overrun;
1164 } __packed;
1165 
1166 struct fgraph_ops;
1167 
1168 /* Type of the callback handlers for tracing function graph*/
1169 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
1170 				       struct fgraph_ops *,
1171 				       struct ftrace_regs *); /* return */
1172 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
1173 				      struct fgraph_ops *,
1174 				      struct ftrace_regs *); /* entry */
1175 
1176 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
1177 				   struct fgraph_ops *gops,
1178 				   struct ftrace_regs *fregs);
1179 bool ftrace_pids_enabled(struct ftrace_ops *ops);
1180 
1181 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1182 
1183 struct fgraph_ops {
1184 	trace_func_graph_ent_t		entryfunc;
1185 	trace_func_graph_ret_t		retfunc;
1186 	struct ftrace_ops		ops; /* for the hash lists */
1187 	void				*private;
1188 	trace_func_graph_ent_t		saved_func;
1189 	int				idx;
1190 };
1191 
1192 void *fgraph_reserve_data(int idx, int size_bytes);
1193 void *fgraph_retrieve_data(int idx, int *size_bytes);
1194 void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
1195 
1196 /*
1197  * Stack of return addresses for functions
1198  * of a thread.
1199  * Used in struct thread_info
1200  */
1201 struct ftrace_ret_stack {
1202 	unsigned long ret;
1203 	unsigned long func;
1204 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
1205 	unsigned long fp;
1206 #endif
1207 	unsigned long *retp;
1208 };
1209 
1210 /*
1211  * Primary handler of a function return.
1212  * It relays on ftrace_return_to_handler.
1213  * Defined in entry_32/64.S
1214  */
1215 extern void return_to_handler(void);
1216 
1217 extern int
1218 function_graph_enter_regs(unsigned long ret, unsigned long func,
1219 			  unsigned long frame_pointer, unsigned long *retp,
1220 			  struct ftrace_regs *fregs);
1221 
function_graph_enter(unsigned long ret,unsigned long func,unsigned long fp,unsigned long * retp)1222 static inline int function_graph_enter(unsigned long ret, unsigned long func,
1223 				       unsigned long fp, unsigned long *retp)
1224 {
1225 	return function_graph_enter_regs(ret, func, fp, retp, NULL);
1226 }
1227 
1228 struct ftrace_ret_stack *
1229 ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
1230 unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
1231 
1232 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1233 				    unsigned long ret, unsigned long *retp);
1234 unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
1235 
1236 /*
1237  * Sometimes we don't want to trace a function with the function
1238  * graph tracer but we want them to keep traced by the usual function
1239  * tracer if the function graph tracer is not configured.
1240  */
1241 #define __notrace_funcgraph		notrace
1242 
1243 #define FTRACE_RETFUNC_DEPTH 50
1244 #define FTRACE_RETSTACK_ALLOC_SIZE 32
1245 
1246 extern int register_ftrace_graph(struct fgraph_ops *ops);
1247 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1248 
1249 /**
1250  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
1251  *
1252  * ftrace_graph_stop() is called when a severe error is detected in
1253  * the function graph tracing. This function is called by the critical
1254  * paths of function graph to keep those paths from doing any more harm.
1255  */
1256 DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
1257 
ftrace_graph_is_dead(void)1258 static inline bool ftrace_graph_is_dead(void)
1259 {
1260 	return static_branch_unlikely(&kill_ftrace_graph);
1261 }
1262 
1263 extern void ftrace_graph_stop(void);
1264 
1265 /* The current handlers in use */
1266 extern trace_func_graph_ret_t ftrace_graph_return;
1267 extern trace_func_graph_ent_t ftrace_graph_entry;
1268 
1269 extern void ftrace_graph_init_task(struct task_struct *t);
1270 extern void ftrace_graph_exit_task(struct task_struct *t);
1271 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
1272 
1273 /* Used by assembly, but to quiet sparse warnings */
1274 extern struct ftrace_ops *function_trace_op;
1275 
pause_graph_tracing(void)1276 static inline void pause_graph_tracing(void)
1277 {
1278 	atomic_inc(&current->tracing_graph_pause);
1279 }
1280 
unpause_graph_tracing(void)1281 static inline void unpause_graph_tracing(void)
1282 {
1283 	atomic_dec(&current->tracing_graph_pause);
1284 }
1285 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
1286 
1287 #define __notrace_funcgraph
1288 
ftrace_graph_init_task(struct task_struct * t)1289 static inline void ftrace_graph_init_task(struct task_struct *t) { }
ftrace_graph_exit_task(struct task_struct * t)1290 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1291 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
1292 
1293 /* Define as macros as fgraph_ops may not be defined */
1294 #define register_ftrace_graph(ops) ({ -1; })
1295 #define unregister_ftrace_graph(ops) do { } while (0)
1296 
1297 static inline unsigned long
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)1298 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1299 		      unsigned long *retp)
1300 {
1301 	return ret;
1302 }
1303 
pause_graph_tracing(void)1304 static inline void pause_graph_tracing(void) { }
unpause_graph_tracing(void)1305 static inline void unpause_graph_tracing(void) { }
1306 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1307 
1308 #ifdef CONFIG_TRACING
1309 enum ftrace_dump_mode;
1310 
1311 extern int ftrace_dump_on_oops_enabled(void);
1312 
1313 extern void disable_trace_on_warning(void);
1314 
1315 #else /* CONFIG_TRACING */
disable_trace_on_warning(void)1316 static inline void  disable_trace_on_warning(void) { }
1317 #endif /* CONFIG_TRACING */
1318 
1319 #ifdef CONFIG_FTRACE_SYSCALLS
1320 
1321 unsigned long arch_syscall_addr(int nr);
1322 
1323 #endif /* CONFIG_FTRACE_SYSCALLS */
1324 
1325 #endif /* _LINUX_FTRACE_H */
1326