1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #define pr_fmt(fmt) "kprobes: " fmt 4 5 #include <linux/kprobes.h> 6 #include <linux/extable.h> 7 #include <linux/slab.h> 8 #include <linux/stop_machine.h> 9 #include <linux/vmalloc.h> 10 #include <asm/ptrace.h> 11 #include <linux/uaccess.h> 12 #include <asm/sections.h> 13 #include <asm/cacheflush.h> 14 #include <asm/bug.h> 15 #include <asm/patch.h> 16 17 #include "decode-insn.h" 18 19 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 20 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 21 22 static void __kprobes 23 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); 24 25 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 26 { 27 size_t len = GET_INSN_LENGTH(p->opcode); 28 u32 insn = __BUG_INSN_32; 29 30 p->ainsn.api.restore = (unsigned long)p->addr + len; 31 32 patch_text_nosync(p->ainsn.api.insn, &p->opcode, len); 33 patch_text_nosync(p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn)); 34 } 35 36 static void __kprobes arch_prepare_simulate(struct kprobe *p) 37 { 38 p->ainsn.api.restore = 0; 39 } 40 41 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 42 { 43 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 44 45 if (p->ainsn.api.handler) 46 p->ainsn.api.handler((u32)p->opcode, 47 (unsigned long)p->addr, regs); 48 49 post_kprobe_handler(p, kcb, regs); 50 } 51 52 static bool __kprobes arch_check_kprobe(struct kprobe *p) 53 { 54 unsigned long tmp = (unsigned long)p->addr - p->offset; 55 unsigned long addr = (unsigned long)p->addr; 56 57 while (tmp <= addr) { 58 if (tmp == addr) 59 return true; 60 61 tmp += GET_INSN_LENGTH(*(u16 *)tmp); 62 } 63 64 return false; 65 } 66 67 int __kprobes arch_prepare_kprobe(struct kprobe *p) 68 { 69 u16 *insn = (u16 *)p->addr; 70 71 if ((unsigned long)insn & 0x1) 72 return -EILSEQ; 73 74 if (!arch_check_kprobe(p)) 75 return -EILSEQ; 76 77 /* copy instruction */ 78 p->opcode = (kprobe_opcode_t)(*insn++); 79 if (GET_INSN_LENGTH(p->opcode) == 4) 80 p->opcode |= (kprobe_opcode_t)(*insn) << 16; 81 82 /* decode instruction */ 83 switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { 84 case INSN_REJECTED: /* insn not supported */ 85 return -EINVAL; 86 87 case INSN_GOOD_NO_SLOT: /* insn need simulation */ 88 p->ainsn.api.insn = NULL; 89 break; 90 91 case INSN_GOOD: /* instruction uses slot */ 92 p->ainsn.api.insn = get_insn_slot(); 93 if (!p->ainsn.api.insn) 94 return -ENOMEM; 95 break; 96 } 97 98 /* prepare the instruction */ 99 if (p->ainsn.api.insn) 100 arch_prepare_ss_slot(p); 101 else 102 arch_prepare_simulate(p); 103 104 return 0; 105 } 106 107 /* install breakpoint in text */ 108 void __kprobes arch_arm_kprobe(struct kprobe *p) 109 { 110 size_t len = GET_INSN_LENGTH(p->opcode); 111 u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16; 112 113 patch_text(p->addr, &insn, len); 114 } 115 116 /* remove breakpoint from text */ 117 void __kprobes arch_disarm_kprobe(struct kprobe *p) 118 { 119 size_t len = GET_INSN_LENGTH(p->opcode); 120 121 patch_text(p->addr, &p->opcode, len); 122 } 123 124 void __kprobes arch_remove_kprobe(struct kprobe *p) 125 { 126 } 127 128 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 129 { 130 kcb->prev_kprobe.kp = kprobe_running(); 131 kcb->prev_kprobe.status = kcb->kprobe_status; 132 } 133 134 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 135 { 136 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 137 kcb->kprobe_status = kcb->prev_kprobe.status; 138 } 139 140 static void __kprobes set_current_kprobe(struct kprobe *p) 141 { 142 __this_cpu_write(current_kprobe, p); 143 } 144 145 /* 146 * Interrupts need to be disabled before single-step mode is set, and not 147 * reenabled until after single-step mode ends. 148 * Without disabling interrupt on local CPU, there is a chance of 149 * interrupt occurrence in the period of exception return and start of 150 * out-of-line single-step, that result in wrongly single stepping 151 * into the interrupt handler. 152 */ 153 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 154 struct pt_regs *regs) 155 { 156 kcb->saved_status = regs->status; 157 regs->status &= ~SR_SPIE; 158 } 159 160 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 161 struct pt_regs *regs) 162 { 163 regs->status = kcb->saved_status; 164 } 165 166 static void __kprobes setup_singlestep(struct kprobe *p, 167 struct pt_regs *regs, 168 struct kprobe_ctlblk *kcb, int reenter) 169 { 170 unsigned long slot; 171 172 if (reenter) { 173 save_previous_kprobe(kcb); 174 set_current_kprobe(p); 175 kcb->kprobe_status = KPROBE_REENTER; 176 } else { 177 kcb->kprobe_status = KPROBE_HIT_SS; 178 } 179 180 if (p->ainsn.api.insn) { 181 /* prepare for single stepping */ 182 slot = (unsigned long)p->ainsn.api.insn; 183 184 /* IRQs and single stepping do not mix well. */ 185 kprobes_save_local_irqflag(kcb, regs); 186 187 instruction_pointer_set(regs, slot); 188 } else { 189 /* insn simulation */ 190 arch_simulate_insn(p, regs); 191 } 192 } 193 194 static int __kprobes reenter_kprobe(struct kprobe *p, 195 struct pt_regs *regs, 196 struct kprobe_ctlblk *kcb) 197 { 198 switch (kcb->kprobe_status) { 199 case KPROBE_HIT_SSDONE: 200 case KPROBE_HIT_ACTIVE: 201 kprobes_inc_nmissed_count(p); 202 setup_singlestep(p, regs, kcb, 1); 203 break; 204 case KPROBE_HIT_SS: 205 case KPROBE_REENTER: 206 pr_warn("Failed to recover from reentered kprobes.\n"); 207 dump_kprobe(p); 208 BUG(); 209 break; 210 default: 211 WARN_ON(1); 212 return 0; 213 } 214 215 return 1; 216 } 217 218 static void __kprobes 219 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) 220 { 221 /* return addr restore if non-branching insn */ 222 if (cur->ainsn.api.restore != 0) 223 regs->epc = cur->ainsn.api.restore; 224 225 /* restore back original saved kprobe variables and continue */ 226 if (kcb->kprobe_status == KPROBE_REENTER) { 227 restore_previous_kprobe(kcb); 228 return; 229 } 230 231 /* call post handler */ 232 kcb->kprobe_status = KPROBE_HIT_SSDONE; 233 if (cur->post_handler) { 234 /* post_handler can hit breakpoint and single step 235 * again, so we enable D-flag for recursive exception. 236 */ 237 cur->post_handler(cur, regs, 0); 238 } 239 240 reset_current_kprobe(); 241 } 242 243 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) 244 { 245 struct kprobe *cur = kprobe_running(); 246 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 247 248 switch (kcb->kprobe_status) { 249 case KPROBE_HIT_SS: 250 case KPROBE_REENTER: 251 /* 252 * We are here because the instruction being single 253 * stepped caused a page fault. We reset the current 254 * kprobe and the ip points back to the probe address 255 * and allow the page fault handler to continue as a 256 * normal page fault. 257 */ 258 regs->epc = (unsigned long) cur->addr; 259 BUG_ON(!instruction_pointer(regs)); 260 261 if (kcb->kprobe_status == KPROBE_REENTER) 262 restore_previous_kprobe(kcb); 263 else { 264 kprobes_restore_local_irqflag(kcb, regs); 265 reset_current_kprobe(); 266 } 267 268 break; 269 case KPROBE_HIT_ACTIVE: 270 case KPROBE_HIT_SSDONE: 271 /* 272 * In case the user-specified fault handler returned 273 * zero, try to fix up. 274 */ 275 if (fixup_exception(regs)) 276 return 1; 277 } 278 return 0; 279 } 280 281 bool __kprobes 282 kprobe_breakpoint_handler(struct pt_regs *regs) 283 { 284 struct kprobe *p, *cur_kprobe; 285 struct kprobe_ctlblk *kcb; 286 unsigned long addr = instruction_pointer(regs); 287 288 kcb = get_kprobe_ctlblk(); 289 cur_kprobe = kprobe_running(); 290 291 p = get_kprobe((kprobe_opcode_t *) addr); 292 293 if (p) { 294 if (cur_kprobe) { 295 if (reenter_kprobe(p, regs, kcb)) 296 return true; 297 } else { 298 /* Probe hit */ 299 set_current_kprobe(p); 300 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 301 302 /* 303 * If we have no pre-handler or it returned 0, we 304 * continue with normal processing. If we have a 305 * pre-handler and it returned non-zero, it will 306 * modify the execution path and no need to single 307 * stepping. Let's just reset current kprobe and exit. 308 * 309 * pre_handler can hit a breakpoint and can step thru 310 * before return. 311 */ 312 if (!p->pre_handler || !p->pre_handler(p, regs)) 313 setup_singlestep(p, regs, kcb, 0); 314 else 315 reset_current_kprobe(); 316 } 317 return true; 318 } 319 320 /* 321 * The breakpoint instruction was removed right 322 * after we hit it. Another cpu has removed 323 * either a probepoint or a debugger breakpoint 324 * at this address. In either case, no further 325 * handling of this interrupt is appropriate. 326 * Return back to original instruction, and continue. 327 */ 328 return false; 329 } 330 331 bool __kprobes 332 kprobe_single_step_handler(struct pt_regs *regs) 333 { 334 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 335 unsigned long addr = instruction_pointer(regs); 336 struct kprobe *cur = kprobe_running(); 337 338 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && 339 ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) { 340 kprobes_restore_local_irqflag(kcb, regs); 341 post_kprobe_handler(cur, kcb, regs); 342 return true; 343 } 344 /* not ours, kprobes should ignore it */ 345 return false; 346 } 347 348 /* 349 * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 350 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 351 */ 352 int __init arch_populate_kprobe_blacklist(void) 353 { 354 int ret; 355 356 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 357 (unsigned long)__irqentry_text_end); 358 return ret; 359 } 360 361 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 362 { 363 return 0; 364 } 365 366 int __init arch_init_kprobes(void) 367 { 368 return 0; 369 } 370