1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch/parisc/kernel/kprobes.c 4 * 5 * PA-RISC kprobes implementation 6 * 7 * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/kprobes.h> 12 #include <linux/slab.h> 13 #include <asm/cacheflush.h> 14 #include <asm/patch.h> 15 16 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 17 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 18 19 int __kprobes arch_prepare_kprobe(struct kprobe *p) 20 { 21 if ((unsigned long)p->addr & 3UL) 22 return -EINVAL; 23 24 p->ainsn.insn = get_insn_slot(); 25 if (!p->ainsn.insn) 26 return -ENOMEM; 27 28 memcpy(p->ainsn.insn, p->addr, 29 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 30 p->opcode = *p->addr; 31 flush_insn_slot(p); 32 return 0; 33 } 34 35 void __kprobes arch_remove_kprobe(struct kprobe *p) 36 { 37 if (!p->ainsn.insn) 38 return; 39 40 free_insn_slot(p->ainsn.insn, 0); 41 p->ainsn.insn = NULL; 42 } 43 44 void __kprobes arch_arm_kprobe(struct kprobe *p) 45 { 46 patch_text(p->addr, PARISC_KPROBES_BREAK_INSN); 47 } 48 49 void __kprobes arch_disarm_kprobe(struct kprobe *p) 50 { 51 patch_text(p->addr, p->opcode); 52 } 53 54 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 55 { 56 kcb->prev_kprobe.kp = kprobe_running(); 57 kcb->prev_kprobe.status = kcb->kprobe_status; 58 } 59 60 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 61 { 62 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 63 kcb->kprobe_status = kcb->prev_kprobe.status; 64 } 65 66 static inline void __kprobes set_current_kprobe(struct kprobe *p) 67 { 68 __this_cpu_write(current_kprobe, p); 69 } 70 71 static void __kprobes setup_singlestep(struct kprobe *p, 72 struct kprobe_ctlblk *kcb, struct pt_regs *regs) 73 { 74 kcb->iaoq[0] = regs->iaoq[0]; 75 kcb->iaoq[1] = regs->iaoq[1]; 76 regs->iaoq[0] = (unsigned long)p->ainsn.insn; 77 mtctl(0, 0); 78 regs->gr[0] |= PSW_R; 79 } 80 81 int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs) 82 { 83 struct kprobe *p; 84 struct kprobe_ctlblk *kcb; 85 86 preempt_disable(); 87 88 kcb = get_kprobe_ctlblk(); 89 p = get_kprobe((unsigned long *)regs->iaoq[0]); 90 91 if (!p) { 92 preempt_enable_no_resched(); 93 return 0; 94 } 95 96 if (kprobe_running()) { 97 /* 98 * We have reentered the kprobe_handler, since another kprobe 99 * was hit while within the handler, we save the original 100 * kprobes and single step on the instruction of the new probe 101 * without calling any user handlers to avoid recursive 102 * kprobes. 103 */ 104 save_previous_kprobe(kcb); 105 set_current_kprobe(p); 106 kprobes_inc_nmissed_count(p); 107 setup_singlestep(p, kcb, regs); 108 kcb->kprobe_status = KPROBE_REENTER; 109 return 1; 110 } 111 112 set_current_kprobe(p); 113 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 114 115 /* If we have no pre-handler or it returned 0, we continue with 116 * normal processing. If we have a pre-handler and it returned 117 * non-zero - which means user handler setup registers to exit 118 * to another instruction, we must skip the single stepping. 119 */ 120 121 if (!p->pre_handler || !p->pre_handler(p, regs)) { 122 setup_singlestep(p, kcb, regs); 123 kcb->kprobe_status = KPROBE_HIT_SS; 124 } else { 125 reset_current_kprobe(); 126 preempt_enable_no_resched(); 127 } 128 return 1; 129 } 130 131 int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs) 132 { 133 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 134 struct kprobe *p = kprobe_running(); 135 136 if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4) 137 return 0; 138 139 /* restore back original saved kprobe variables and continue */ 140 if (kcb->kprobe_status == KPROBE_REENTER) { 141 restore_previous_kprobe(kcb); 142 return 1; 143 } 144 145 /* for absolute branch instructions we can copy iaoq_b. for relative 146 * branch instructions we need to calculate the new address based on the 147 * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without 148 * modificationt because it's based on our ainsn.insn address. 149 */ 150 151 if (p->post_handler) 152 p->post_handler(p, regs, 0); 153 154 switch (regs->iir >> 26) { 155 case 0x38: /* BE */ 156 case 0x39: /* BE,L */ 157 case 0x3a: /* BV */ 158 case 0x3b: /* BVE */ 159 /* for absolute branches, regs->iaoq[1] has already the right 160 * address 161 */ 162 regs->iaoq[0] = kcb->iaoq[1]; 163 break; 164 default: 165 regs->iaoq[1] = kcb->iaoq[0]; 166 regs->iaoq[1] += (regs->iaoq[1] - regs->iaoq[0]) + 4; 167 regs->iaoq[0] = kcb->iaoq[1]; 168 break; 169 } 170 kcb->kprobe_status = KPROBE_HIT_SSDONE; 171 reset_current_kprobe(); 172 return 1; 173 } 174 175 static inline void kretprobe_trampoline(void) 176 { 177 asm volatile("nop"); 178 asm volatile("nop"); 179 } 180 181 static int __kprobes trampoline_probe_handler(struct kprobe *p, 182 struct pt_regs *regs); 183 184 static struct kprobe trampoline_p = { 185 .pre_handler = trampoline_probe_handler 186 }; 187 188 static int __kprobes trampoline_probe_handler(struct kprobe *p, 189 struct pt_regs *regs) 190 { 191 struct kretprobe_instance *ri = NULL; 192 struct hlist_head *head, empty_rp; 193 struct hlist_node *tmp; 194 unsigned long flags, orig_ret_address = 0; 195 unsigned long trampoline_address = (unsigned long)trampoline_p.addr; 196 kprobe_opcode_t *correct_ret_addr = NULL; 197 198 INIT_HLIST_HEAD(&empty_rp); 199 kretprobe_hash_lock(current, &head, &flags); 200 201 /* 202 * It is possible to have multiple instances associated with a given 203 * task either because multiple functions in the call path have 204 * a return probe installed on them, and/or more than one return 205 * probe was registered for a target function. 206 * 207 * We can handle this because: 208 * - instances are always inserted at the head of the list 209 * - when multiple return probes are registered for the same 210 * function, the first instance's ret_addr will point to the 211 * real return address, and all the rest will point to 212 * kretprobe_trampoline 213 */ 214 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 215 if (ri->task != current) 216 /* another task is sharing our hash bucket */ 217 continue; 218 219 orig_ret_address = (unsigned long)ri->ret_addr; 220 221 if (orig_ret_address != trampoline_address) 222 /* 223 * This is the real return address. Any other 224 * instances associated with this task are for 225 * other calls deeper on the call stack 226 */ 227 break; 228 } 229 230 kretprobe_assert(ri, orig_ret_address, trampoline_address); 231 232 correct_ret_addr = ri->ret_addr; 233 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 234 if (ri->task != current) 235 /* another task is sharing our hash bucket */ 236 continue; 237 238 orig_ret_address = (unsigned long)ri->ret_addr; 239 if (ri->rp && ri->rp->handler) { 240 __this_cpu_write(current_kprobe, &ri->rp->kp); 241 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 242 ri->ret_addr = correct_ret_addr; 243 ri->rp->handler(ri, regs); 244 __this_cpu_write(current_kprobe, NULL); 245 } 246 247 recycle_rp_inst(ri, &empty_rp); 248 249 if (orig_ret_address != trampoline_address) 250 /* 251 * This is the real return address. Any other 252 * instances associated with this task are for 253 * other calls deeper on the call stack 254 */ 255 break; 256 } 257 258 kretprobe_hash_unlock(current, &flags); 259 260 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 261 hlist_del(&ri->hlist); 262 kfree(ri); 263 } 264 instruction_pointer_set(regs, orig_ret_address); 265 return 1; 266 } 267 268 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 269 struct pt_regs *regs) 270 { 271 ri->ret_addr = (kprobe_opcode_t *)regs->gr[2]; 272 273 /* Replace the return addr with trampoline addr. */ 274 regs->gr[2] = (unsigned long)trampoline_p.addr; 275 } 276 277 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 278 { 279 return p->addr == trampoline_p.addr; 280 } 281 bool arch_kprobe_on_func_entry(unsigned long offset) 282 { 283 return !offset; 284 } 285 286 int __init arch_init_kprobes(void) 287 { 288 trampoline_p.addr = (kprobe_opcode_t *) 289 dereference_function_descriptor(kretprobe_trampoline); 290 return register_kprobe(&trampoline_p); 291 } 292