1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/kernel/probes/kprobes.c 4 * 5 * Kprobes support for ARM64 6 * 7 * Copyright (C) 2013 Linaro Limited. 8 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "kprobes: " fmt 12 13 #include <linux/execmem.h> 14 #include <linux/extable.h> 15 #include <linux/kasan.h> 16 #include <linux/kernel.h> 17 #include <linux/kprobes.h> 18 #include <linux/sched/debug.h> 19 #include <linux/set_memory.h> 20 #include <linux/slab.h> 21 #include <linux/stop_machine.h> 22 #include <linux/stringify.h> 23 #include <linux/uaccess.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/daifflags.h> 28 #include <asm/debug-monitors.h> 29 #include <asm/insn.h> 30 #include <asm/irq.h> 31 #include <asm/text-patching.h> 32 #include <asm/ptrace.h> 33 #include <asm/sections.h> 34 #include <asm/system_misc.h> 35 #include <asm/traps.h> 36 37 #include "decode-insn.h" 38 39 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 41 42 static void __kprobes 43 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); 44 45 void *alloc_insn_page(void) 46 { 47 void *addr; 48 49 addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); 50 if (!addr) 51 return NULL; 52 if (set_memory_rox((unsigned long)addr, 1)) { 53 execmem_free(addr); 54 return NULL; 55 } 56 return addr; 57 } 58 59 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 60 { 61 kprobe_opcode_t *addr = p->ainsn.xol_insn; 62 63 /* 64 * Prepare insn slot, Mark Rutland points out it depends on a coupe of 65 * subtleties: 66 * 67 * - That the I-cache maintenance for these instructions is complete 68 * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync() 69 * ensures this, but just omits causing a Context-Synchronization-Event 70 * on all CPUS). 71 * 72 * - That the kprobe BRK results in an exception (and consequently a 73 * Context-Synchronoization-Event), which ensures that the CPU will 74 * fetch thesingle-step slot instructions *after* this, ensuring that 75 * the new instructions are used 76 * 77 * It supposes to place ISB after patching to guarantee I-cache maintenance 78 * is observed on all CPUS, however, single-step slot is installed in 79 * the BRK exception handler, so it is unnecessary to generate 80 * Contex-Synchronization-Event via ISB again. 81 */ 82 aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode)); 83 aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS); 84 85 /* 86 * Needs restoring of return address after stepping xol. 87 */ 88 p->ainsn.xol_restore = (unsigned long) p->addr + 89 sizeof(kprobe_opcode_t); 90 } 91 92 static void __kprobes arch_prepare_simulate(struct kprobe *p) 93 { 94 /* This instructions is not executed xol. No need to adjust the PC */ 95 p->ainsn.xol_restore = 0; 96 } 97 98 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 99 { 100 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 101 102 if (p->ainsn.api.handler) 103 p->ainsn.api.handler(le32_to_cpu(p->opcode), (long)p->addr, regs); 104 105 /* single step simulated, now go for post processing */ 106 post_kprobe_handler(p, kcb, regs); 107 } 108 109 int __kprobes arch_prepare_kprobe(struct kprobe *p) 110 { 111 unsigned long probe_addr = (unsigned long)p->addr; 112 113 if (probe_addr & 0x3) 114 return -EINVAL; 115 116 /* copy instruction */ 117 p->opcode = *p->addr; 118 119 if (search_exception_tables(probe_addr)) 120 return -EINVAL; 121 122 /* decode instruction */ 123 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { 124 case INSN_REJECTED: /* insn not supported */ 125 return -EINVAL; 126 127 case INSN_GOOD_NO_SLOT: /* insn need simulation */ 128 p->ainsn.xol_insn = NULL; 129 break; 130 131 case INSN_GOOD: /* instruction uses slot */ 132 p->ainsn.xol_insn = get_insn_slot(); 133 if (!p->ainsn.xol_insn) 134 return -ENOMEM; 135 break; 136 } 137 138 /* prepare the instruction */ 139 if (p->ainsn.xol_insn) 140 arch_prepare_ss_slot(p); 141 else 142 arch_prepare_simulate(p); 143 144 return 0; 145 } 146 147 /* arm kprobe: install breakpoint in text */ 148 void __kprobes arch_arm_kprobe(struct kprobe *p) 149 { 150 void *addr = p->addr; 151 u32 insn = BRK64_OPCODE_KPROBES; 152 153 aarch64_insn_patch_text(&addr, &insn, 1); 154 } 155 156 /* disarm kprobe: remove breakpoint from text */ 157 void __kprobes arch_disarm_kprobe(struct kprobe *p) 158 { 159 void *addr = p->addr; 160 u32 insn = le32_to_cpu(p->opcode); 161 162 aarch64_insn_patch_text(&addr, &insn, 1); 163 } 164 165 void __kprobes arch_remove_kprobe(struct kprobe *p) 166 { 167 if (p->ainsn.xol_insn) { 168 free_insn_slot(p->ainsn.xol_insn, 0); 169 p->ainsn.xol_insn = NULL; 170 } 171 } 172 173 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 174 { 175 kcb->prev_kprobe.kp = kprobe_running(); 176 kcb->prev_kprobe.status = kcb->kprobe_status; 177 } 178 179 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 180 { 181 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 182 kcb->kprobe_status = kcb->prev_kprobe.status; 183 } 184 185 static void __kprobes set_current_kprobe(struct kprobe *p) 186 { 187 __this_cpu_write(current_kprobe, p); 188 } 189 190 /* 191 * Mask all of DAIF while executing the instruction out-of-line, to keep things 192 * simple and avoid nesting exceptions. Interrupts do have to be disabled since 193 * the kprobe state is per-CPU and doesn't get migrated. 194 */ 195 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 196 struct pt_regs *regs) 197 { 198 kcb->saved_irqflag = regs->pstate & DAIF_MASK; 199 regs->pstate |= DAIF_MASK; 200 } 201 202 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 203 struct pt_regs *regs) 204 { 205 regs->pstate &= ~DAIF_MASK; 206 regs->pstate |= kcb->saved_irqflag; 207 } 208 209 static void __kprobes setup_singlestep(struct kprobe *p, 210 struct pt_regs *regs, 211 struct kprobe_ctlblk *kcb, int reenter) 212 { 213 unsigned long slot; 214 215 if (reenter) { 216 save_previous_kprobe(kcb); 217 set_current_kprobe(p); 218 kcb->kprobe_status = KPROBE_REENTER; 219 } else { 220 kcb->kprobe_status = KPROBE_HIT_SS; 221 } 222 223 224 if (p->ainsn.xol_insn) { 225 /* prepare for single stepping */ 226 slot = (unsigned long)p->ainsn.xol_insn; 227 228 kprobes_save_local_irqflag(kcb, regs); 229 instruction_pointer_set(regs, slot); 230 } else { 231 /* insn simulation */ 232 arch_simulate_insn(p, regs); 233 } 234 } 235 236 static int __kprobes reenter_kprobe(struct kprobe *p, 237 struct pt_regs *regs, 238 struct kprobe_ctlblk *kcb) 239 { 240 switch (kcb->kprobe_status) { 241 case KPROBE_HIT_SSDONE: 242 case KPROBE_HIT_ACTIVE: 243 kprobes_inc_nmissed_count(p); 244 setup_singlestep(p, regs, kcb, 1); 245 break; 246 case KPROBE_HIT_SS: 247 case KPROBE_REENTER: 248 pr_warn("Failed to recover from reentered kprobes.\n"); 249 dump_kprobe(p); 250 BUG(); 251 break; 252 default: 253 WARN_ON(1); 254 return 0; 255 } 256 257 return 1; 258 } 259 260 static void __kprobes 261 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) 262 { 263 /* return addr restore if non-branching insn */ 264 if (cur->ainsn.xol_restore != 0) 265 instruction_pointer_set(regs, cur->ainsn.xol_restore); 266 267 /* restore back original saved kprobe variables and continue */ 268 if (kcb->kprobe_status == KPROBE_REENTER) { 269 restore_previous_kprobe(kcb); 270 return; 271 } 272 /* call post handler */ 273 kcb->kprobe_status = KPROBE_HIT_SSDONE; 274 if (cur->post_handler) 275 cur->post_handler(cur, regs, 0); 276 277 reset_current_kprobe(); 278 } 279 280 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) 281 { 282 struct kprobe *cur = kprobe_running(); 283 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 284 285 switch (kcb->kprobe_status) { 286 case KPROBE_HIT_SS: 287 case KPROBE_REENTER: 288 /* 289 * We are here because the instruction being single 290 * stepped caused a page fault. We reset the current 291 * kprobe and the ip points back to the probe address 292 * and allow the page fault handler to continue as a 293 * normal page fault. 294 */ 295 instruction_pointer_set(regs, (unsigned long) cur->addr); 296 BUG_ON(!instruction_pointer(regs)); 297 298 if (kcb->kprobe_status == KPROBE_REENTER) { 299 restore_previous_kprobe(kcb); 300 } else { 301 kprobes_restore_local_irqflag(kcb, regs); 302 reset_current_kprobe(); 303 } 304 305 break; 306 } 307 return 0; 308 } 309 310 int __kprobes 311 kprobe_brk_handler(struct pt_regs *regs, unsigned long esr) 312 { 313 struct kprobe *p, *cur_kprobe; 314 struct kprobe_ctlblk *kcb; 315 unsigned long addr = instruction_pointer(regs); 316 317 kcb = get_kprobe_ctlblk(); 318 cur_kprobe = kprobe_running(); 319 320 p = get_kprobe((kprobe_opcode_t *) addr); 321 if (WARN_ON_ONCE(!p)) { 322 /* 323 * Something went wrong. This BRK used an immediate reserved 324 * for kprobes, but we couldn't find any corresponding probe. 325 */ 326 return DBG_HOOK_ERROR; 327 } 328 329 if (cur_kprobe) { 330 /* Hit a kprobe inside another kprobe */ 331 if (!reenter_kprobe(p, regs, kcb)) 332 return DBG_HOOK_ERROR; 333 } else { 334 /* Probe hit */ 335 set_current_kprobe(p); 336 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 337 338 /* 339 * If we have no pre-handler or it returned 0, we 340 * continue with normal processing. If we have a 341 * pre-handler and it returned non-zero, it will 342 * modify the execution path and not need to single-step 343 * Let's just reset current kprobe and exit. 344 */ 345 if (!p->pre_handler || !p->pre_handler(p, regs)) 346 setup_singlestep(p, regs, kcb, 0); 347 else 348 reset_current_kprobe(); 349 } 350 351 return DBG_HOOK_HANDLED; 352 } 353 354 int __kprobes 355 kprobe_ss_brk_handler(struct pt_regs *regs, unsigned long esr) 356 { 357 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 358 unsigned long addr = instruction_pointer(regs); 359 struct kprobe *cur = kprobe_running(); 360 361 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && 362 ((unsigned long)&cur->ainsn.xol_insn[1] == addr)) { 363 kprobes_restore_local_irqflag(kcb, regs); 364 post_kprobe_handler(cur, kcb, regs); 365 366 return DBG_HOOK_HANDLED; 367 } 368 369 /* not ours, kprobes should ignore it */ 370 return DBG_HOOK_ERROR; 371 } 372 373 int __kprobes 374 kretprobe_brk_handler(struct pt_regs *regs, unsigned long esr) 375 { 376 if (regs->pc != (unsigned long)__kretprobe_trampoline) 377 return DBG_HOOK_ERROR; 378 379 regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); 380 return DBG_HOOK_HANDLED; 381 } 382 383 /* 384 * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 385 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 386 */ 387 int __init arch_populate_kprobe_blacklist(void) 388 { 389 int ret; 390 391 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start, 392 (unsigned long)__entry_text_end); 393 if (ret) 394 return ret; 395 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 396 (unsigned long)__irqentry_text_end); 397 if (ret) 398 return ret; 399 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, 400 (unsigned long)__hyp_text_end); 401 if (ret || is_kernel_in_hyp_mode()) 402 return ret; 403 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start, 404 (unsigned long)__hyp_idmap_text_end); 405 return ret; 406 } 407 408 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 409 struct pt_regs *regs) 410 { 411 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; 412 ri->fp = (void *)regs->regs[29]; 413 414 /* replace return addr (x30) with trampoline */ 415 regs->regs[30] = (long)&__kretprobe_trampoline; 416 } 417 418 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 419 { 420 return 0; 421 } 422 423 int __init arch_init_kprobes(void) 424 { 425 return 0; 426 } 427