Lines Matching +full:post +full:- +full:processing
1 // SPDX-License-Identifier: GPL-2.0-only
28 #include <asm/debug-monitors.h>
31 #include <asm/text-patching.h>
37 #include "decode-insn.h"
58 kprobe_opcode_t *addr = p->ainsn.xol_insn; in arch_prepare_ss_slot()
64 * - That the I-cache maintenance for these instructions is complete in arch_prepare_ss_slot()
66 * ensures this, but just omits causing a Context-Synchronization-Event in arch_prepare_ss_slot()
69 * - That the kprobe BRK results in an exception (and consequently a in arch_prepare_ss_slot()
70 * Context-Synchronoization-Event), which ensures that the CPU will in arch_prepare_ss_slot()
71 * fetch thesingle-step slot instructions *after* this, ensuring that in arch_prepare_ss_slot()
74 * It supposes to place ISB after patching to guarantee I-cache maintenance in arch_prepare_ss_slot()
75 * is observed on all CPUS, however, single-step slot is installed in in arch_prepare_ss_slot()
77 * Contex-Synchronization-Event via ISB again. in arch_prepare_ss_slot()
79 aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode)); in arch_prepare_ss_slot()
85 p->ainsn.xol_restore = (unsigned long) p->addr + in arch_prepare_ss_slot()
92 p->ainsn.xol_restore = 0; in arch_prepare_simulate()
99 if (p->ainsn.api.handler) in arch_simulate_insn()
100 p->ainsn.api.handler(le32_to_cpu(p->opcode), (long)p->addr, regs); in arch_simulate_insn()
102 /* single step simulated, now go for post processing */ in arch_simulate_insn()
108 unsigned long probe_addr = (unsigned long)p->addr; in arch_prepare_kprobe()
111 return -EINVAL; in arch_prepare_kprobe()
114 p->opcode = *p->addr; in arch_prepare_kprobe()
117 return -EINVAL; in arch_prepare_kprobe()
120 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { in arch_prepare_kprobe()
122 return -EINVAL; in arch_prepare_kprobe()
125 p->ainsn.xol_insn = NULL; in arch_prepare_kprobe()
129 p->ainsn.xol_insn = get_insn_slot(); in arch_prepare_kprobe()
130 if (!p->ainsn.xol_insn) in arch_prepare_kprobe()
131 return -ENOMEM; in arch_prepare_kprobe()
136 if (p->ainsn.xol_insn) in arch_prepare_kprobe()
147 void *addr = p->addr; in arch_arm_kprobe()
156 void *addr = p->addr; in arch_disarm_kprobe()
157 u32 insn = le32_to_cpu(p->opcode); in arch_disarm_kprobe()
164 if (p->ainsn.xol_insn) { in arch_remove_kprobe()
165 free_insn_slot(p->ainsn.xol_insn, 0); in arch_remove_kprobe()
166 p->ainsn.xol_insn = NULL; in arch_remove_kprobe()
172 kcb->prev_kprobe.kp = kprobe_running(); in save_previous_kprobe()
173 kcb->prev_kprobe.status = kcb->kprobe_status; in save_previous_kprobe()
178 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); in restore_previous_kprobe()
179 kcb->kprobe_status = kcb->prev_kprobe.status; in restore_previous_kprobe()
188 * Mask all of DAIF while executing the instruction out-of-line, to keep things
190 * the kprobe state is per-CPU and doesn't get migrated.
195 kcb->saved_irqflag = regs->pstate & DAIF_MASK; in kprobes_save_local_irqflag()
196 regs->pstate |= DAIF_MASK; in kprobes_save_local_irqflag()
202 regs->pstate &= ~DAIF_MASK; in kprobes_restore_local_irqflag()
203 regs->pstate |= kcb->saved_irqflag; in kprobes_restore_local_irqflag()
215 kcb->kprobe_status = KPROBE_REENTER; in setup_singlestep()
217 kcb->kprobe_status = KPROBE_HIT_SS; in setup_singlestep()
221 if (p->ainsn.xol_insn) { in setup_singlestep()
223 slot = (unsigned long)p->ainsn.xol_insn; in setup_singlestep()
237 switch (kcb->kprobe_status) { in reenter_kprobe()
260 /* return addr restore if non-branching insn */ in post_kprobe_handler()
261 if (cur->ainsn.xol_restore != 0) in post_kprobe_handler()
262 instruction_pointer_set(regs, cur->ainsn.xol_restore); in post_kprobe_handler()
265 if (kcb->kprobe_status == KPROBE_REENTER) { in post_kprobe_handler()
269 /* call post handler */ in post_kprobe_handler()
270 kcb->kprobe_status = KPROBE_HIT_SSDONE; in post_kprobe_handler()
271 if (cur->post_handler) in post_kprobe_handler()
272 cur->post_handler(cur, regs, 0); in post_kprobe_handler()
282 switch (kcb->kprobe_status) { in kprobe_fault_handler()
292 instruction_pointer_set(regs, (unsigned long) cur->addr); in kprobe_fault_handler()
295 if (kcb->kprobe_status == KPROBE_REENTER) { in kprobe_fault_handler()
333 kcb->kprobe_status = KPROBE_HIT_ACTIVE; in kprobe_brk_handler()
336 * If we have no pre-handler or it returned 0, we in kprobe_brk_handler()
337 * continue with normal processing. If we have a in kprobe_brk_handler()
338 * pre-handler and it returned non-zero, it will in kprobe_brk_handler()
339 * modify the execution path and not need to single-step in kprobe_brk_handler()
342 if (!p->pre_handler || !p->pre_handler(p, regs)) in kprobe_brk_handler()
358 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && in kprobe_ss_brk_handler()
359 ((unsigned long)&cur->ainsn.xol_insn[1] == addr)) { in kprobe_ss_brk_handler()
373 if (regs->pc != (unsigned long)__kretprobe_trampoline) in kretprobe_brk_handler()
376 regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); in kretprobe_brk_handler()
408 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; in arch_prepare_kretprobe()
409 ri->fp = (void *)regs->regs[29]; in arch_prepare_kretprobe()
412 regs->regs[30] = (long)&__kretprobe_trampoline; in arch_prepare_kretprobe()