1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Dynamic function tracer architecture backend.
4 *
5 * Copyright IBM Corp. 2009,2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/kmsan-checks.h>
16 #include <linux/cpufeature.h>
17 #include <linux/kprobes.h>
18 #include <linux/execmem.h>
19 #include <trace/syscall.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/text-patching.h>
22 #include <asm/cacheflush.h>
23 #include <asm/ftrace.lds.h>
24 #include <asm/nospec-branch.h>
25 #include <asm/set_memory.h>
26 #include "entry.h"
27 #include "ftrace.h"
28
29 /*
30 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
31 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
32 * (since gcc 9 / clang 10) is used.
33 * In both cases the original and also the disabled function prologue contains
34 * only a single six byte instruction and looks like this:
35 * > brcl 0,0 # offset 0
36 * To enable ftrace the code gets patched like above and afterwards looks
37 * like this:
38 * > brasl %r0,ftrace_caller # offset 0
39 *
40 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
41 * The ftrace function gets called with a non-standard C function call ABI
42 * where r0 contains the return address. It is also expected that the called
43 * function only clobbers r0 and r1, but restores r2-r15.
44 * For module code we can't directly jump to ftrace caller, but need a
45 * trampoline (ftrace_plt), which clobbers also r1.
46 */
47
48 void *ftrace_func __read_mostly = ftrace_stub;
49 struct ftrace_insn {
50 u16 opc;
51 s32 disp;
52 } __packed;
53
ftrace_shared_hotpatch_trampoline(const char ** end)54 static const char *ftrace_shared_hotpatch_trampoline(const char **end)
55 {
56 const char *tstart, *tend;
57
58 tstart = ftrace_shared_hotpatch_trampoline_br;
59 tend = ftrace_shared_hotpatch_trampoline_br_end;
60 #ifdef CONFIG_EXPOLINE
61 if (!nospec_disable) {
62 tstart = ftrace_shared_hotpatch_trampoline_exrl;
63 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
64 }
65 #endif /* CONFIG_EXPOLINE */
66 if (end)
67 *end = tend;
68 return tstart;
69 }
70
ftrace_need_init_nop(void)71 bool ftrace_need_init_nop(void)
72 {
73 return !cpu_has_seq_insn();
74 }
75
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)76 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
77 {
78 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
79 __ftrace_hotpatch_trampolines_start;
80 static const struct ftrace_insn orig = { .opc = 0xc004, .disp = 0 };
81 static struct ftrace_hotpatch_trampoline *trampoline;
82 struct ftrace_hotpatch_trampoline **next_trampoline;
83 struct ftrace_hotpatch_trampoline *trampolines_end;
84 struct ftrace_hotpatch_trampoline tmp;
85 struct ftrace_insn *insn;
86 struct ftrace_insn old;
87 const char *shared;
88 s32 disp;
89
90 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
91 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
92
93 next_trampoline = &next_vmlinux_trampoline;
94 trampolines_end = __ftrace_hotpatch_trampolines_end;
95 shared = ftrace_shared_hotpatch_trampoline(NULL);
96 #ifdef CONFIG_MODULES
97 if (mod) {
98 next_trampoline = &mod->arch.next_trampoline;
99 trampolines_end = mod->arch.trampolines_end;
100 }
101 #endif
102
103 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
104 return -ENOMEM;
105 trampoline = (*next_trampoline)++;
106
107 if (copy_from_kernel_nofault(&old, (void *)rec->ip, sizeof(old)))
108 return -EFAULT;
109 /* Check for the compiler-generated fentry nop (brcl 0, .). */
110 if (WARN_ON_ONCE(memcmp(&orig, &old, sizeof(old))))
111 return -EINVAL;
112
113 /* Generate the trampoline. */
114 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
115 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
116 tmp.interceptor = FTRACE_ADDR;
117 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
118 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
119
120 /* Generate a jump to the trampoline. */
121 disp = ((char *)trampoline - (char *)rec->ip) / 2;
122 insn = (struct ftrace_insn *)rec->ip;
123 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
124
125 return 0;
126 }
127
ftrace_get_trampoline(struct dyn_ftrace * rec)128 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
129 {
130 struct ftrace_hotpatch_trampoline *trampoline;
131 struct ftrace_insn insn;
132 s64 disp;
133 u16 opc;
134
135 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
136 return ERR_PTR(-EFAULT);
137 disp = (s64)insn.disp * 2;
138 trampoline = (void *)(rec->ip + disp);
139 if (get_kernel_nofault(opc, &trampoline->brasl_opc))
140 return ERR_PTR(-EFAULT);
141 if (opc != 0xc015)
142 return ERR_PTR(-EINVAL);
143 return trampoline;
144 }
145
146 static inline struct ftrace_insn
ftrace_generate_branch_insn(unsigned long ip,unsigned long target)147 ftrace_generate_branch_insn(unsigned long ip, unsigned long target)
148 {
149 /* brasl r0,target or brcl 0,0 */
150 return (struct ftrace_insn){ .opc = target ? 0xc005 : 0xc004,
151 .disp = target ? (target - ip) / 2 : 0 };
152 }
153
ftrace_patch_branch_insn(unsigned long ip,unsigned long old_target,unsigned long target)154 static int ftrace_patch_branch_insn(unsigned long ip, unsigned long old_target,
155 unsigned long target)
156 {
157 struct ftrace_insn orig = ftrace_generate_branch_insn(ip, old_target);
158 struct ftrace_insn new = ftrace_generate_branch_insn(ip, target);
159 struct ftrace_insn old;
160
161 if (!IS_ALIGNED(ip, 8))
162 return -EINVAL;
163 if (copy_from_kernel_nofault(&old, (void *)ip, sizeof(old)))
164 return -EFAULT;
165 /* Verify that the to be replaced code matches what we expect. */
166 if (memcmp(&orig, &old, sizeof(old)))
167 return -EINVAL;
168 s390_kernel_write((void *)ip, &new, sizeof(new));
169 return 0;
170 }
171
ftrace_modify_trampoline_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)172 static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec,
173 unsigned long old_addr,
174 unsigned long addr)
175 {
176 struct ftrace_hotpatch_trampoline *trampoline;
177 u64 old;
178
179 trampoline = ftrace_get_trampoline(rec);
180 if (IS_ERR(trampoline))
181 return PTR_ERR(trampoline);
182 if (get_kernel_nofault(old, &trampoline->interceptor))
183 return -EFAULT;
184 if (old != old_addr)
185 return -EINVAL;
186 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
187 return 0;
188 }
189
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)190 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
191 unsigned long addr)
192 {
193 if (cpu_has_seq_insn())
194 return ftrace_patch_branch_insn(rec->ip, old_addr, addr);
195 else
196 return ftrace_modify_trampoline_call(rec, old_addr, addr);
197 }
198
ftrace_patch_branch_mask(void * addr,u16 expected,bool enable)199 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
200 {
201 u16 old;
202 u8 op;
203
204 if (get_kernel_nofault(old, addr))
205 return -EFAULT;
206 if (old != expected)
207 return -EINVAL;
208 /* set mask field to all ones or zeroes */
209 op = enable ? 0xf4 : 0x04;
210 s390_kernel_write((char *)addr + 1, &op, sizeof(op));
211 return 0;
212 }
213
ftrace_make_nop(struct module * mod,struct dyn_ftrace * rec,unsigned long addr)214 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
215 unsigned long addr)
216 {
217 /* Expect brcl 0xf,... for the !cpu_has_seq_insn() case */
218 if (cpu_has_seq_insn())
219 return ftrace_patch_branch_insn(rec->ip, addr, 0);
220 else
221 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
222 }
223
ftrace_make_trampoline_call(struct dyn_ftrace * rec,unsigned long addr)224 static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long addr)
225 {
226 struct ftrace_hotpatch_trampoline *trampoline;
227
228 trampoline = ftrace_get_trampoline(rec);
229 if (IS_ERR(trampoline))
230 return PTR_ERR(trampoline);
231 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
232 /* Expect brcl 0x0,... */
233 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
234 }
235
ftrace_make_call(struct dyn_ftrace * rec,unsigned long addr)236 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
237 {
238 if (cpu_has_seq_insn())
239 return ftrace_patch_branch_insn(rec->ip, 0, addr);
240 else
241 return ftrace_make_trampoline_call(rec, addr);
242 }
243
ftrace_update_ftrace_func(ftrace_func_t func)244 int ftrace_update_ftrace_func(ftrace_func_t func)
245 {
246 ftrace_func = func;
247 return 0;
248 }
249
arch_ftrace_update_code(int command)250 void arch_ftrace_update_code(int command)
251 {
252 ftrace_modify_all_code(command);
253 }
254
ftrace_arch_code_modify_post_process(void)255 void ftrace_arch_code_modify_post_process(void)
256 {
257 /*
258 * Flush any pre-fetched instructions on all
259 * CPUs to make the new code visible.
260 */
261 text_poke_sync_lock();
262 }
263
264 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
265
ftrace_graph_func(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)266 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
267 struct ftrace_ops *op, struct ftrace_regs *fregs)
268 {
269 unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
270 unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15];
271
272 if (unlikely(ftrace_graph_is_dead()))
273 return;
274 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
275 return;
276 if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs))
277 *parent = (unsigned long)&return_to_handler;
278 }
279
280 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
281
282 #ifdef CONFIG_KPROBES_ON_FTRACE
kprobe_ftrace_handler(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * ops,struct ftrace_regs * fregs)283 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
284 struct ftrace_ops *ops, struct ftrace_regs *fregs)
285 {
286 struct kprobe_ctlblk *kcb;
287 struct pt_regs *regs;
288 struct kprobe *p;
289 int bit;
290
291 if (unlikely(kprobe_ftrace_disabled))
292 return;
293
294 bit = ftrace_test_recursion_trylock(ip, parent_ip);
295 if (bit < 0)
296 return;
297
298 kmsan_unpoison_memory(fregs, ftrace_regs_size());
299 regs = ftrace_get_regs(fregs);
300 p = get_kprobe((kprobe_opcode_t *)ip);
301 if (!regs || unlikely(!p) || kprobe_disabled(p))
302 goto out;
303
304 if (kprobe_running()) {
305 kprobes_inc_nmissed_count(p);
306 goto out;
307 }
308
309 __this_cpu_write(current_kprobe, p);
310
311 kcb = get_kprobe_ctlblk();
312 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
313
314 instruction_pointer_set(regs, ip);
315
316 if (!p->pre_handler || !p->pre_handler(p, regs)) {
317
318 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
319
320 if (unlikely(p->post_handler)) {
321 kcb->kprobe_status = KPROBE_HIT_SSDONE;
322 p->post_handler(p, regs, 0);
323 }
324 }
325 __this_cpu_write(current_kprobe, NULL);
326 out:
327 ftrace_test_recursion_unlock(bit);
328 }
329 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
330
arch_prepare_kprobe_ftrace(struct kprobe * p)331 int arch_prepare_kprobe_ftrace(struct kprobe *p)
332 {
333 p->ainsn.insn = NULL;
334 return 0;
335 }
336 #endif
337