xref: /linux/arch/x86/include/asm/ftrace.h (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_FTRACE_H
3 #define _ASM_X86_FTRACE_H
4 
5 #include <asm/ptrace.h>
6 
7 #ifdef CONFIG_FUNCTION_TRACER
8 #ifndef CC_USING_FENTRY
9 # error Compiler does not support fentry?
10 #endif
11 # define MCOUNT_ADDR		((unsigned long)(__fentry__))
12 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
13 
14 /* Ignore unused weak functions which will have non zero offsets */
15 #ifdef CONFIG_HAVE_FENTRY
16 # include <asm/ibt.h>
17 /* Add offset for endbr64 if IBT enabled */
18 # define FTRACE_MCOUNT_MAX_OFFSET	ENDBR_INSN_SIZE
19 #endif
20 
21 #ifdef CONFIG_DYNAMIC_FTRACE
22 #define ARCH_SUPPORTS_FTRACE_OPS 1
23 #endif
24 
25 #ifndef __ASSEMBLY__
26 extern void __fentry__(void);
27 
28 static inline unsigned long ftrace_call_adjust(unsigned long addr)
29 {
30 	/*
31 	 * addr is the address of the mcount call instruction.
32 	 * recordmcount does the necessary offset calculation.
33 	 */
34 	return addr;
35 }
36 
37 static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
38 {
39 #ifdef CONFIG_X86_KERNEL_IBT
40 	u32 instr;
41 
42 	/* We want to be extra safe in case entry ip is on the page edge,
43 	 * but otherwise we need to avoid get_kernel_nofault()'s overhead.
44 	 */
45 	if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
46 		if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
47 			return fentry_ip;
48 	} else {
49 		instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
50 	}
51 	if (is_endbr(instr))
52 		fentry_ip -= ENDBR_INSN_SIZE;
53 #endif
54 	return fentry_ip;
55 }
56 #define ftrace_get_symaddr(fentry_ip)	arch_ftrace_get_symaddr(fentry_ip)
57 
58 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
59 
60 #include <linux/ftrace_regs.h>
61 
62 static __always_inline struct pt_regs *
63 arch_ftrace_get_regs(struct ftrace_regs *fregs)
64 {
65 	/* Only when FL_SAVE_REGS is set, cs will be non zero */
66 	if (!arch_ftrace_regs(fregs)->regs.cs)
67 		return NULL;
68 	return &arch_ftrace_regs(fregs)->regs;
69 }
70 
71 #define arch_ftrace_fill_perf_regs(fregs, _regs) do {	\
72 		(_regs)->ip = arch_ftrace_regs(fregs)->regs.ip;		\
73 		(_regs)->sp = arch_ftrace_regs(fregs)->regs.sp;		\
74 		(_regs)->cs = __KERNEL_CS;		\
75 		(_regs)->flags = 0;			\
76 	} while (0)
77 
78 #define ftrace_regs_set_instruction_pointer(fregs, _ip)	\
79 	do { arch_ftrace_regs(fregs)->regs.ip = (_ip); } while (0)
80 
81 
82 static __always_inline unsigned long
83 ftrace_regs_get_return_address(struct ftrace_regs *fregs)
84 {
85 	return *(unsigned long *)ftrace_regs_get_stack_pointer(fregs);
86 }
87 
88 struct ftrace_ops;
89 #define ftrace_graph_func ftrace_graph_func
90 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
91 		       struct ftrace_ops *op, struct ftrace_regs *fregs);
92 #else
93 #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
94 #endif
95 
96 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
97 /*
98  * When a ftrace registered caller is tracing a function that is
99  * also set by a register_ftrace_direct() call, it needs to be
100  * differentiated in the ftrace_caller trampoline. To do this, we
101  * place the direct caller in the ORIG_AX part of pt_regs. This
102  * tells the ftrace_caller that there's a direct caller.
103  */
104 static inline void
105 __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
106 {
107 	/* Emulate a call */
108 	regs->orig_ax = addr;
109 }
110 #define arch_ftrace_set_direct_caller(fregs, addr) \
111 	__arch_ftrace_set_direct_caller(&arch_ftrace_regs(fregs)->regs, addr)
112 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
113 
114 #ifdef CONFIG_DYNAMIC_FTRACE
115 
116 struct dyn_arch_ftrace {
117 	/* No extra data needed for x86 */
118 };
119 
120 #endif /*  CONFIG_DYNAMIC_FTRACE */
121 #endif /* __ASSEMBLY__ */
122 #endif /* CONFIG_FUNCTION_TRACER */
123 
124 
125 #ifndef __ASSEMBLY__
126 
127 void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
128 			   unsigned long frame_pointer);
129 
130 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
131 extern void set_ftrace_ops_ro(void);
132 #else
133 static inline void set_ftrace_ops_ro(void) { }
134 #endif
135 
136 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
137 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
138 {
139 	/*
140 	 * Compare the symbol name with the system call name. Skip the
141 	 * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix.
142 	 */
143 	return !strcmp(sym + 3, name + 3) ||
144 		(!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
145 		(!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)) ||
146 		(!strncmp(sym, "__do_sys", 8) && !strcmp(sym + 8, name + 3));
147 }
148 
149 #ifndef COMPILE_OFFSETS
150 
151 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
152 #include <linux/compat.h>
153 
154 /*
155  * Because ia32 syscalls do not map to x86_64 syscall numbers
156  * this screws up the trace output when tracing a ia32 task.
157  * Instead of reporting bogus syscalls, just do not trace them.
158  *
159  * If the user really wants these, then they should use the
160  * raw syscall tracepoints with filtering.
161  */
162 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
163 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
164 {
165 	return in_32bit_syscall();
166 }
167 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
168 #endif /* !COMPILE_OFFSETS */
169 #endif /* !__ASSEMBLY__ */
170 
171 #endif /* _ASM_X86_FTRACE_H */
172