1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Kernel Probes (KProbes)
4 *
5 * Copyright (C) IBM Corporation, 2002, 2004
6 *
7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
8 * Probes initial implementation ( includes contributions from
9 * Rusty Russell).
10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
11 * interface to access function arguments.
12 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
13 * for PPC64
14 */
15
16 #include <linux/kprobes.h>
17 #include <linux/ptrace.h>
18 #include <linux/preempt.h>
19 #include <linux/extable.h>
20 #include <linux/kdebug.h>
21 #include <linux/slab.h>
22 #include <linux/set_memory.h>
23 #include <linux/execmem.h>
24 #include <asm/code-patching.h>
25 #include <asm/cacheflush.h>
26 #include <asm/sstep.h>
27 #include <asm/sections.h>
28 #include <asm/inst.h>
29 #include <linux/uaccess.h>
30
31 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
32 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
33
34 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
35
arch_within_kprobe_blacklist(unsigned long addr)36 bool arch_within_kprobe_blacklist(unsigned long addr)
37 {
38 return (addr >= (unsigned long)__kprobes_text_start &&
39 addr < (unsigned long)__kprobes_text_end) ||
40 (addr >= (unsigned long)_stext &&
41 addr < (unsigned long)__head_end);
42 }
43
kprobe_lookup_name(const char * name,unsigned int offset)44 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
45 {
46 kprobe_opcode_t *addr = NULL;
47
48 #ifdef CONFIG_PPC64_ELF_ABI_V2
49 /* PPC64 ABIv2 needs local entry point */
50 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
51 if (addr && !offset) {
52 #ifdef CONFIG_KPROBES_ON_FTRACE
53 unsigned long faddr;
54 /*
55 * Per livepatch.h, ftrace location is always within the first
56 * 16 bytes of a function on powerpc with -mprofile-kernel.
57 */
58 faddr = ftrace_location_range((unsigned long)addr,
59 (unsigned long)addr + 16);
60 if (faddr)
61 addr = (kprobe_opcode_t *)faddr;
62 else
63 #endif
64 addr = (kprobe_opcode_t *)ppc_function_entry(addr);
65 }
66 #elif defined(CONFIG_PPC64_ELF_ABI_V1)
67 /*
68 * 64bit powerpc ABIv1 uses function descriptors:
69 * - Check for the dot variant of the symbol first.
70 * - If that fails, try looking up the symbol provided.
71 *
72 * This ensures we always get to the actual symbol and not
73 * the descriptor.
74 *
75 * Also handle <module:symbol> format.
76 */
77 char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
78 bool dot_appended = false;
79 const char *c;
80 ssize_t ret = 0;
81 int len = 0;
82
83 if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
84 c++;
85 len = c - name;
86 memcpy(dot_name, name, len);
87 } else
88 c = name;
89
90 if (*c != '\0' && *c != '.') {
91 dot_name[len++] = '.';
92 dot_appended = true;
93 }
94 ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
95 if (ret > 0)
96 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
97
98 /* Fallback to the original non-dot symbol lookup */
99 if (!addr && dot_appended)
100 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
101 #else
102 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
103 #endif
104
105 return addr;
106 }
107
arch_kprobe_on_func_entry(unsigned long offset)108 static bool arch_kprobe_on_func_entry(unsigned long offset)
109 {
110 #ifdef CONFIG_PPC64_ELF_ABI_V2
111 #ifdef CONFIG_KPROBES_ON_FTRACE
112 return offset <= 16;
113 #else
114 return offset <= 8;
115 #endif
116 #else
117 return !offset;
118 #endif
119 }
120
121 /* XXX try and fold the magic of kprobe_lookup_name() in this */
arch_adjust_kprobe_addr(unsigned long addr,unsigned long offset,bool * on_func_entry)122 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
123 bool *on_func_entry)
124 {
125 *on_func_entry = arch_kprobe_on_func_entry(offset);
126 return (kprobe_opcode_t *)(addr + offset);
127 }
128
arch_prepare_kprobe(struct kprobe * p)129 int arch_prepare_kprobe(struct kprobe *p)
130 {
131 int ret = 0;
132 struct kprobe *prev;
133 ppc_inst_t insn = ppc_inst_read(p->addr);
134
135 if ((unsigned long)p->addr & 0x03) {
136 printk("Attempt to register kprobe at an unaligned address\n");
137 ret = -EINVAL;
138 } else if (!can_single_step(ppc_inst_val(insn))) {
139 printk("Cannot register a kprobe on instructions that can't be single stepped\n");
140 ret = -EINVAL;
141 } else if ((unsigned long)p->addr & ~PAGE_MASK &&
142 ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
143 printk("Cannot register a kprobe on the second word of prefixed instruction\n");
144 ret = -EINVAL;
145 }
146 prev = get_kprobe(p->addr - 1);
147
148 /*
149 * When prev is a ftrace-based kprobe, we don't have an insn, and it
150 * doesn't probe for prefixed instruction.
151 */
152 if (prev && !kprobe_ftrace(prev) &&
153 ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
154 printk("Cannot register a kprobe on the second word of prefixed instruction\n");
155 ret = -EINVAL;
156 }
157
158 /* insn must be on a special executable page on ppc64. This is
159 * not explicitly required on ppc32 (right now), but it doesn't hurt */
160 if (!ret) {
161 p->ainsn.insn = get_insn_slot();
162 if (!p->ainsn.insn)
163 ret = -ENOMEM;
164 }
165
166 if (!ret) {
167 patch_instruction(p->ainsn.insn, insn);
168 p->opcode = ppc_inst_val(insn);
169 }
170
171 p->ainsn.boostable = 0;
172 return ret;
173 }
174 NOKPROBE_SYMBOL(arch_prepare_kprobe);
175
arch_arm_kprobe(struct kprobe * p)176 void arch_arm_kprobe(struct kprobe *p)
177 {
178 WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
179 }
180 NOKPROBE_SYMBOL(arch_arm_kprobe);
181
arch_disarm_kprobe(struct kprobe * p)182 void arch_disarm_kprobe(struct kprobe *p)
183 {
184 WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
185 }
186 NOKPROBE_SYMBOL(arch_disarm_kprobe);
187
arch_remove_kprobe(struct kprobe * p)188 void arch_remove_kprobe(struct kprobe *p)
189 {
190 if (p->ainsn.insn) {
191 free_insn_slot(p->ainsn.insn, 0);
192 p->ainsn.insn = NULL;
193 }
194 }
195 NOKPROBE_SYMBOL(arch_remove_kprobe);
196
prepare_singlestep(struct kprobe * p,struct pt_regs * regs)197 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
198 {
199 enable_single_step(regs);
200
201 /*
202 * On powerpc we should single step on the original
203 * instruction even if the probed insn is a trap
204 * variant as values in regs could play a part in
205 * if the trap is taken or not
206 */
207 regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
208 }
209
save_previous_kprobe(struct kprobe_ctlblk * kcb)210 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
211 {
212 kcb->prev_kprobe.kp = kprobe_running();
213 kcb->prev_kprobe.status = kcb->kprobe_status;
214 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
215 }
216
restore_previous_kprobe(struct kprobe_ctlblk * kcb)217 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
218 {
219 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
220 kcb->kprobe_status = kcb->prev_kprobe.status;
221 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
222 }
223
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)224 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
225 struct kprobe_ctlblk *kcb)
226 {
227 __this_cpu_write(current_kprobe, p);
228 kcb->kprobe_saved_msr = regs->msr;
229 }
230
try_to_emulate(struct kprobe * p,struct pt_regs * regs)231 static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
232 {
233 int ret;
234 ppc_inst_t insn = ppc_inst_read(p->ainsn.insn);
235
236 /* regs->nip is also adjusted if emulate_step returns 1 */
237 ret = emulate_step(regs, insn);
238 if (ret > 0) {
239 /*
240 * Once this instruction has been boosted
241 * successfully, set the boostable flag
242 */
243 if (unlikely(p->ainsn.boostable == 0))
244 p->ainsn.boostable = 1;
245 } else if (ret < 0) {
246 /*
247 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
248 * So, we should never get here... but, its still
249 * good to catch them, just in case...
250 */
251 printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn));
252 BUG();
253 } else {
254 /*
255 * If we haven't previously emulated this instruction, then it
256 * can't be boosted. Note it down so we don't try to do so again.
257 *
258 * If, however, we had emulated this instruction in the past,
259 * then this is just an error with the current run (for
260 * instance, exceptions due to a load/store). We return 0 so
261 * that this is now single-stepped, but continue to try
262 * emulating it in subsequent probe hits.
263 */
264 if (unlikely(p->ainsn.boostable != 1))
265 p->ainsn.boostable = -1;
266 }
267
268 return ret;
269 }
270 NOKPROBE_SYMBOL(try_to_emulate);
271
kprobe_handler(struct pt_regs * regs)272 int kprobe_handler(struct pt_regs *regs)
273 {
274 struct kprobe *p;
275 int ret = 0;
276 unsigned int *addr = (unsigned int *)regs->nip;
277 struct kprobe_ctlblk *kcb;
278
279 if (user_mode(regs))
280 return 0;
281
282 if (!IS_ENABLED(CONFIG_BOOKE) &&
283 (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
284 return 0;
285
286 /*
287 * We don't want to be preempted for the entire
288 * duration of kprobe processing
289 */
290 preempt_disable();
291 kcb = get_kprobe_ctlblk();
292
293 p = get_kprobe(addr);
294 if (!p) {
295 unsigned int instr;
296
297 if (get_kernel_nofault(instr, addr))
298 goto no_kprobe;
299
300 if (instr != BREAKPOINT_INSTRUCTION) {
301 /*
302 * PowerPC has multiple variants of the "trap"
303 * instruction. If the current instruction is a
304 * trap variant, it could belong to someone else
305 */
306 if (is_trap(instr))
307 goto no_kprobe;
308 /*
309 * The breakpoint instruction was removed right
310 * after we hit it. Another cpu has removed
311 * either a probepoint or a debugger breakpoint
312 * at this address. In either case, no further
313 * handling of this interrupt is appropriate.
314 */
315 ret = 1;
316 }
317 /* Not one of ours: let kernel handle it */
318 goto no_kprobe;
319 }
320
321 /* Check we're not actually recursing */
322 if (kprobe_running()) {
323 kprobe_opcode_t insn = *p->ainsn.insn;
324 if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
325 /* Turn off 'trace' bits */
326 regs_set_return_msr(regs,
327 (regs->msr & ~MSR_SINGLESTEP) |
328 kcb->kprobe_saved_msr);
329 goto no_kprobe;
330 }
331
332 /*
333 * We have reentered the kprobe_handler(), since another probe
334 * was hit while within the handler. We here save the original
335 * kprobes variables and just single step on the instruction of
336 * the new probe without calling any user handlers.
337 */
338 save_previous_kprobe(kcb);
339 set_current_kprobe(p, regs, kcb);
340 kprobes_inc_nmissed_count(p);
341 kcb->kprobe_status = KPROBE_REENTER;
342 if (p->ainsn.boostable >= 0) {
343 ret = try_to_emulate(p, regs);
344
345 if (ret > 0) {
346 restore_previous_kprobe(kcb);
347 preempt_enable();
348 return 1;
349 }
350 }
351 prepare_singlestep(p, regs);
352 return 1;
353 }
354
355 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
356 set_current_kprobe(p, regs, kcb);
357 if (p->pre_handler && p->pre_handler(p, regs)) {
358 /* handler changed execution path, so skip ss setup */
359 reset_current_kprobe();
360 preempt_enable();
361 return 1;
362 }
363
364 if (p->ainsn.boostable >= 0) {
365 ret = try_to_emulate(p, regs);
366
367 if (ret > 0) {
368 if (p->post_handler)
369 p->post_handler(p, regs, 0);
370
371 kcb->kprobe_status = KPROBE_HIT_SSDONE;
372 reset_current_kprobe();
373 preempt_enable();
374 return 1;
375 }
376 }
377 prepare_singlestep(p, regs);
378 kcb->kprobe_status = KPROBE_HIT_SS;
379 return 1;
380
381 no_kprobe:
382 preempt_enable();
383 return ret;
384 }
385 NOKPROBE_SYMBOL(kprobe_handler);
386
387 /*
388 * Called after single-stepping. p->addr is the address of the
389 * instruction whose first byte has been replaced by the "breakpoint"
390 * instruction. To avoid the SMP problems that can occur when we
391 * temporarily put back the original opcode to single-step, we
392 * single-stepped a copy of the instruction. The address of this
393 * copy is p->ainsn.insn.
394 */
kprobe_post_handler(struct pt_regs * regs)395 int kprobe_post_handler(struct pt_regs *regs)
396 {
397 int len;
398 struct kprobe *cur = kprobe_running();
399 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
400
401 if (!cur || user_mode(regs))
402 return 0;
403
404 len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
405 /* make sure we got here for instruction we have a kprobe on */
406 if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
407 return 0;
408
409 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
410 kcb->kprobe_status = KPROBE_HIT_SSDONE;
411 cur->post_handler(cur, regs, 0);
412 }
413
414 /* Adjust nip to after the single-stepped instruction */
415 regs_set_return_ip(regs, (unsigned long)cur->addr + len);
416 regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
417
418 /*Restore back the original saved kprobes variables and continue. */
419 if (kcb->kprobe_status == KPROBE_REENTER) {
420 restore_previous_kprobe(kcb);
421 goto out;
422 }
423 reset_current_kprobe();
424 out:
425 preempt_enable();
426
427 /*
428 * if somebody else is singlestepping across a probe point, msr
429 * will have DE/SE set, in which case, continue the remaining processing
430 * of do_debug, as if this is not a probe hit.
431 */
432 if (regs->msr & MSR_SINGLESTEP)
433 return 0;
434
435 return 1;
436 }
437 NOKPROBE_SYMBOL(kprobe_post_handler);
438
kprobe_fault_handler(struct pt_regs * regs,int trapnr)439 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
440 {
441 struct kprobe *cur = kprobe_running();
442 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
443 const struct exception_table_entry *entry;
444
445 switch(kcb->kprobe_status) {
446 case KPROBE_HIT_SS:
447 case KPROBE_REENTER:
448 /*
449 * We are here because the instruction being single
450 * stepped caused a page fault. We reset the current
451 * kprobe and the nip points back to the probe address
452 * and allow the page fault handler to continue as a
453 * normal page fault.
454 */
455 regs_set_return_ip(regs, (unsigned long)cur->addr);
456 /* Turn off 'trace' bits */
457 regs_set_return_msr(regs,
458 (regs->msr & ~MSR_SINGLESTEP) |
459 kcb->kprobe_saved_msr);
460 if (kcb->kprobe_status == KPROBE_REENTER)
461 restore_previous_kprobe(kcb);
462 else
463 reset_current_kprobe();
464 preempt_enable();
465 break;
466 case KPROBE_HIT_ACTIVE:
467 case KPROBE_HIT_SSDONE:
468 /*
469 * In case the user-specified fault handler returned
470 * zero, try to fix up.
471 */
472 if ((entry = search_exception_tables(regs->nip)) != NULL) {
473 regs_set_return_ip(regs, extable_fixup(entry));
474 return 1;
475 }
476
477 /*
478 * fixup_exception() could not handle it,
479 * Let do_page_fault() fix it.
480 */
481 break;
482 default:
483 break;
484 }
485 return 0;
486 }
487 NOKPROBE_SYMBOL(kprobe_fault_handler);
488
arch_trampoline_kprobe(struct kprobe * p)489 int arch_trampoline_kprobe(struct kprobe *p)
490 {
491 if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
492 return 1;
493
494 return 0;
495 }
496 NOKPROBE_SYMBOL(arch_trampoline_kprobe);
497