1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/kernel/probes/kprobes.c
4 *
5 * Kprobes support for ARM64
6 *
7 * Copyright (C) 2013 Linaro Limited.
8 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 */
10
11 #define pr_fmt(fmt) "kprobes: " fmt
12
13 #include <linux/extable.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/kprobes.h>
17 #include <linux/sched/debug.h>
18 #include <linux/set_memory.h>
19 #include <linux/slab.h>
20 #include <linux/stop_machine.h>
21 #include <linux/stringify.h>
22 #include <linux/uaccess.h>
23 #include <linux/vmalloc.h>
24
25 #include <asm/cacheflush.h>
26 #include <asm/daifflags.h>
27 #include <asm/debug-monitors.h>
28 #include <asm/insn.h>
29 #include <asm/irq.h>
30 #include <asm/patching.h>
31 #include <asm/ptrace.h>
32 #include <asm/sections.h>
33 #include <asm/system_misc.h>
34 #include <asm/traps.h>
35
36 #include "decode-insn.h"
37
38 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
39 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
40
41 static void __kprobes
42 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
43
arch_prepare_ss_slot(struct kprobe * p)44 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
45 {
46 kprobe_opcode_t *addr = p->ainsn.api.insn;
47
48 /*
49 * Prepare insn slot, Mark Rutland points out it depends on a coupe of
50 * subtleties:
51 *
52 * - That the I-cache maintenance for these instructions is complete
53 * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync()
54 * ensures this, but just omits causing a Context-Synchronization-Event
55 * on all CPUS).
56 *
57 * - That the kprobe BRK results in an exception (and consequently a
58 * Context-Synchronoization-Event), which ensures that the CPU will
59 * fetch thesingle-step slot instructions *after* this, ensuring that
60 * the new instructions are used
61 *
62 * It supposes to place ISB after patching to guarantee I-cache maintenance
63 * is observed on all CPUS, however, single-step slot is installed in
64 * the BRK exception handler, so it is unnecessary to generate
65 * Contex-Synchronization-Event via ISB again.
66 */
67 aarch64_insn_patch_text_nosync(addr, p->opcode);
68 aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS);
69
70 /*
71 * Needs restoring of return address after stepping xol.
72 */
73 p->ainsn.api.restore = (unsigned long) p->addr +
74 sizeof(kprobe_opcode_t);
75 }
76
arch_prepare_simulate(struct kprobe * p)77 static void __kprobes arch_prepare_simulate(struct kprobe *p)
78 {
79 /* This instructions is not executed xol. No need to adjust the PC */
80 p->ainsn.api.restore = 0;
81 }
82
arch_simulate_insn(struct kprobe * p,struct pt_regs * regs)83 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
84 {
85 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
86
87 if (p->ainsn.api.handler)
88 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
89
90 /* single step simulated, now go for post processing */
91 post_kprobe_handler(p, kcb, regs);
92 }
93
arch_prepare_kprobe(struct kprobe * p)94 int __kprobes arch_prepare_kprobe(struct kprobe *p)
95 {
96 unsigned long probe_addr = (unsigned long)p->addr;
97
98 if (probe_addr & 0x3)
99 return -EINVAL;
100
101 /* copy instruction */
102 p->opcode = le32_to_cpu(*p->addr);
103
104 if (search_exception_tables(probe_addr))
105 return -EINVAL;
106
107 /* decode instruction */
108 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
109 case INSN_REJECTED: /* insn not supported */
110 return -EINVAL;
111
112 case INSN_GOOD_NO_SLOT: /* insn need simulation */
113 p->ainsn.api.insn = NULL;
114 break;
115
116 case INSN_GOOD: /* instruction uses slot */
117 p->ainsn.api.insn = get_insn_slot();
118 if (!p->ainsn.api.insn)
119 return -ENOMEM;
120 break;
121 }
122
123 /* prepare the instruction */
124 if (p->ainsn.api.insn)
125 arch_prepare_ss_slot(p);
126 else
127 arch_prepare_simulate(p);
128
129 return 0;
130 }
131
132 /* arm kprobe: install breakpoint in text */
arch_arm_kprobe(struct kprobe * p)133 void __kprobes arch_arm_kprobe(struct kprobe *p)
134 {
135 void *addr = p->addr;
136 u32 insn = BRK64_OPCODE_KPROBES;
137
138 aarch64_insn_patch_text(&addr, &insn, 1);
139 }
140
141 /* disarm kprobe: remove breakpoint from text */
arch_disarm_kprobe(struct kprobe * p)142 void __kprobes arch_disarm_kprobe(struct kprobe *p)
143 {
144 void *addr = p->addr;
145
146 aarch64_insn_patch_text(&addr, &p->opcode, 1);
147 }
148
arch_remove_kprobe(struct kprobe * p)149 void __kprobes arch_remove_kprobe(struct kprobe *p)
150 {
151 if (p->ainsn.api.insn) {
152 free_insn_slot(p->ainsn.api.insn, 0);
153 p->ainsn.api.insn = NULL;
154 }
155 }
156
save_previous_kprobe(struct kprobe_ctlblk * kcb)157 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
158 {
159 kcb->prev_kprobe.kp = kprobe_running();
160 kcb->prev_kprobe.status = kcb->kprobe_status;
161 }
162
restore_previous_kprobe(struct kprobe_ctlblk * kcb)163 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
164 {
165 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
166 kcb->kprobe_status = kcb->prev_kprobe.status;
167 }
168
set_current_kprobe(struct kprobe * p)169 static void __kprobes set_current_kprobe(struct kprobe *p)
170 {
171 __this_cpu_write(current_kprobe, p);
172 }
173
174 /*
175 * Mask all of DAIF while executing the instruction out-of-line, to keep things
176 * simple and avoid nesting exceptions. Interrupts do have to be disabled since
177 * the kprobe state is per-CPU and doesn't get migrated.
178 */
kprobes_save_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)179 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
180 struct pt_regs *regs)
181 {
182 kcb->saved_irqflag = regs->pstate & DAIF_MASK;
183 regs->pstate |= DAIF_MASK;
184 }
185
kprobes_restore_local_irqflag(struct kprobe_ctlblk * kcb,struct pt_regs * regs)186 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
187 struct pt_regs *regs)
188 {
189 regs->pstate &= ~DAIF_MASK;
190 regs->pstate |= kcb->saved_irqflag;
191 }
192
setup_singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb,int reenter)193 static void __kprobes setup_singlestep(struct kprobe *p,
194 struct pt_regs *regs,
195 struct kprobe_ctlblk *kcb, int reenter)
196 {
197 unsigned long slot;
198
199 if (reenter) {
200 save_previous_kprobe(kcb);
201 set_current_kprobe(p);
202 kcb->kprobe_status = KPROBE_REENTER;
203 } else {
204 kcb->kprobe_status = KPROBE_HIT_SS;
205 }
206
207
208 if (p->ainsn.api.insn) {
209 /* prepare for single stepping */
210 slot = (unsigned long)p->ainsn.api.insn;
211
212 kprobes_save_local_irqflag(kcb, regs);
213 instruction_pointer_set(regs, slot);
214 } else {
215 /* insn simulation */
216 arch_simulate_insn(p, regs);
217 }
218 }
219
reenter_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)220 static int __kprobes reenter_kprobe(struct kprobe *p,
221 struct pt_regs *regs,
222 struct kprobe_ctlblk *kcb)
223 {
224 switch (kcb->kprobe_status) {
225 case KPROBE_HIT_SSDONE:
226 case KPROBE_HIT_ACTIVE:
227 kprobes_inc_nmissed_count(p);
228 setup_singlestep(p, regs, kcb, 1);
229 break;
230 case KPROBE_HIT_SS:
231 case KPROBE_REENTER:
232 pr_warn("Failed to recover from reentered kprobes.\n");
233 dump_kprobe(p);
234 BUG();
235 break;
236 default:
237 WARN_ON(1);
238 return 0;
239 }
240
241 return 1;
242 }
243
244 static void __kprobes
post_kprobe_handler(struct kprobe * cur,struct kprobe_ctlblk * kcb,struct pt_regs * regs)245 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
246 {
247 /* return addr restore if non-branching insn */
248 if (cur->ainsn.api.restore != 0)
249 instruction_pointer_set(regs, cur->ainsn.api.restore);
250
251 /* restore back original saved kprobe variables and continue */
252 if (kcb->kprobe_status == KPROBE_REENTER) {
253 restore_previous_kprobe(kcb);
254 return;
255 }
256 /* call post handler */
257 kcb->kprobe_status = KPROBE_HIT_SSDONE;
258 if (cur->post_handler)
259 cur->post_handler(cur, regs, 0);
260
261 reset_current_kprobe();
262 }
263
kprobe_fault_handler(struct pt_regs * regs,unsigned int fsr)264 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
265 {
266 struct kprobe *cur = kprobe_running();
267 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
268
269 switch (kcb->kprobe_status) {
270 case KPROBE_HIT_SS:
271 case KPROBE_REENTER:
272 /*
273 * We are here because the instruction being single
274 * stepped caused a page fault. We reset the current
275 * kprobe and the ip points back to the probe address
276 * and allow the page fault handler to continue as a
277 * normal page fault.
278 */
279 instruction_pointer_set(regs, (unsigned long) cur->addr);
280 BUG_ON(!instruction_pointer(regs));
281
282 if (kcb->kprobe_status == KPROBE_REENTER) {
283 restore_previous_kprobe(kcb);
284 } else {
285 kprobes_restore_local_irqflag(kcb, regs);
286 reset_current_kprobe();
287 }
288
289 break;
290 }
291 return 0;
292 }
293
294 static int __kprobes
kprobe_breakpoint_handler(struct pt_regs * regs,unsigned long esr)295 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
296 {
297 struct kprobe *p, *cur_kprobe;
298 struct kprobe_ctlblk *kcb;
299 unsigned long addr = instruction_pointer(regs);
300
301 kcb = get_kprobe_ctlblk();
302 cur_kprobe = kprobe_running();
303
304 p = get_kprobe((kprobe_opcode_t *) addr);
305 if (WARN_ON_ONCE(!p)) {
306 /*
307 * Something went wrong. This BRK used an immediate reserved
308 * for kprobes, but we couldn't find any corresponding probe.
309 */
310 return DBG_HOOK_ERROR;
311 }
312
313 if (cur_kprobe) {
314 /* Hit a kprobe inside another kprobe */
315 if (!reenter_kprobe(p, regs, kcb))
316 return DBG_HOOK_ERROR;
317 } else {
318 /* Probe hit */
319 set_current_kprobe(p);
320 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
321
322 /*
323 * If we have no pre-handler or it returned 0, we
324 * continue with normal processing. If we have a
325 * pre-handler and it returned non-zero, it will
326 * modify the execution path and not need to single-step
327 * Let's just reset current kprobe and exit.
328 */
329 if (!p->pre_handler || !p->pre_handler(p, regs))
330 setup_singlestep(p, regs, kcb, 0);
331 else
332 reset_current_kprobe();
333 }
334
335 return DBG_HOOK_HANDLED;
336 }
337
338 static struct break_hook kprobes_break_hook = {
339 .imm = KPROBES_BRK_IMM,
340 .fn = kprobe_breakpoint_handler,
341 };
342
343 static int __kprobes
kprobe_breakpoint_ss_handler(struct pt_regs * regs,unsigned long esr)344 kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
345 {
346 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
347 unsigned long addr = instruction_pointer(regs);
348 struct kprobe *cur = kprobe_running();
349
350 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
351 ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
352 kprobes_restore_local_irqflag(kcb, regs);
353 post_kprobe_handler(cur, kcb, regs);
354
355 return DBG_HOOK_HANDLED;
356 }
357
358 /* not ours, kprobes should ignore it */
359 return DBG_HOOK_ERROR;
360 }
361
362 static struct break_hook kprobes_break_ss_hook = {
363 .imm = KPROBES_BRK_SS_IMM,
364 .fn = kprobe_breakpoint_ss_handler,
365 };
366
367 static int __kprobes
kretprobe_breakpoint_handler(struct pt_regs * regs,unsigned long esr)368 kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
369 {
370 if (regs->pc != (unsigned long)__kretprobe_trampoline)
371 return DBG_HOOK_ERROR;
372
373 regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]);
374 return DBG_HOOK_HANDLED;
375 }
376
377 static struct break_hook kretprobes_break_hook = {
378 .imm = KRETPROBES_BRK_IMM,
379 .fn = kretprobe_breakpoint_handler,
380 };
381
382 /*
383 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
384 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
385 */
arch_populate_kprobe_blacklist(void)386 int __init arch_populate_kprobe_blacklist(void)
387 {
388 int ret;
389
390 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
391 (unsigned long)__entry_text_end);
392 if (ret)
393 return ret;
394 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
395 (unsigned long)__irqentry_text_end);
396 if (ret)
397 return ret;
398 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
399 (unsigned long)__hyp_text_end);
400 if (ret || is_kernel_in_hyp_mode())
401 return ret;
402 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
403 (unsigned long)__hyp_idmap_text_end);
404 return ret;
405 }
406
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)407 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
408 struct pt_regs *regs)
409 {
410 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
411 ri->fp = (void *)regs->regs[29];
412
413 /* replace return addr (x30) with trampoline */
414 regs->regs[30] = (long)&__kretprobe_trampoline;
415 }
416
arch_trampoline_kprobe(struct kprobe * p)417 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
418 {
419 return 0;
420 }
421
arch_init_kprobes(void)422 int __init arch_init_kprobes(void)
423 {
424 register_kernel_break_hook(&kprobes_break_hook);
425 register_kernel_break_hook(&kprobes_break_ss_hook);
426 register_kernel_break_hook(&kretprobes_break_hook);
427
428 return 0;
429 }
430