1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/kernel/probes/kprobes.c 4 * 5 * Kprobes support for ARM64 6 * 7 * Copyright (C) 2013 Linaro Limited. 8 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "kprobes: " fmt 12 13 #include <linux/execmem.h> 14 #include <linux/extable.h> 15 #include <linux/kasan.h> 16 #include <linux/kernel.h> 17 #include <linux/kprobes.h> 18 #include <linux/sched/debug.h> 19 #include <linux/set_memory.h> 20 #include <linux/slab.h> 21 #include <linux/stop_machine.h> 22 #include <linux/stringify.h> 23 #include <linux/uaccess.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/daifflags.h> 28 #include <asm/debug-monitors.h> 29 #include <asm/insn.h> 30 #include <asm/irq.h> 31 #include <asm/text-patching.h> 32 #include <asm/ptrace.h> 33 #include <asm/sections.h> 34 #include <asm/system_misc.h> 35 #include <asm/traps.h> 36 37 #include "decode-insn.h" 38 39 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 41 42 static void __kprobes 43 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); 44 45 void *alloc_insn_page(void) 46 { 47 void *addr; 48 49 addr = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); 50 if (!addr) 51 return NULL; 52 set_memory_rox((unsigned long)addr, 1); 53 return addr; 54 } 55 56 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 57 { 58 kprobe_opcode_t *addr = p->ainsn.xol_insn; 59 60 /* 61 * Prepare insn slot, Mark Rutland points out it depends on a coupe of 62 * subtleties: 63 * 64 * - That the I-cache maintenance for these instructions is complete 65 * *before* the kprobe BRK is written (and aarch64_insn_patch_text_nosync() 66 * ensures this, but just omits causing a Context-Synchronization-Event 67 * on all CPUS). 68 * 69 * - That the kprobe BRK results in an exception (and consequently a 70 * Context-Synchronoization-Event), which ensures that the CPU will 71 * fetch thesingle-step slot instructions *after* this, ensuring that 72 * the new instructions are used 73 * 74 * It supposes to place ISB after patching to guarantee I-cache maintenance 75 * is observed on all CPUS, however, single-step slot is installed in 76 * the BRK exception handler, so it is unnecessary to generate 77 * Contex-Synchronization-Event via ISB again. 78 */ 79 aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode)); 80 aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS); 81 82 /* 83 * Needs restoring of return address after stepping xol. 84 */ 85 p->ainsn.xol_restore = (unsigned long) p->addr + 86 sizeof(kprobe_opcode_t); 87 } 88 89 static void __kprobes arch_prepare_simulate(struct kprobe *p) 90 { 91 /* This instructions is not executed xol. No need to adjust the PC */ 92 p->ainsn.xol_restore = 0; 93 } 94 95 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 96 { 97 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 98 99 if (p->ainsn.api.handler) 100 p->ainsn.api.handler(le32_to_cpu(p->opcode), (long)p->addr, regs); 101 102 /* single step simulated, now go for post processing */ 103 post_kprobe_handler(p, kcb, regs); 104 } 105 106 int __kprobes arch_prepare_kprobe(struct kprobe *p) 107 { 108 unsigned long probe_addr = (unsigned long)p->addr; 109 110 if (probe_addr & 0x3) 111 return -EINVAL; 112 113 /* copy instruction */ 114 p->opcode = *p->addr; 115 116 if (search_exception_tables(probe_addr)) 117 return -EINVAL; 118 119 /* decode instruction */ 120 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { 121 case INSN_REJECTED: /* insn not supported */ 122 return -EINVAL; 123 124 case INSN_GOOD_NO_SLOT: /* insn need simulation */ 125 p->ainsn.xol_insn = NULL; 126 break; 127 128 case INSN_GOOD: /* instruction uses slot */ 129 p->ainsn.xol_insn = get_insn_slot(); 130 if (!p->ainsn.xol_insn) 131 return -ENOMEM; 132 break; 133 } 134 135 /* prepare the instruction */ 136 if (p->ainsn.xol_insn) 137 arch_prepare_ss_slot(p); 138 else 139 arch_prepare_simulate(p); 140 141 return 0; 142 } 143 144 /* arm kprobe: install breakpoint in text */ 145 void __kprobes arch_arm_kprobe(struct kprobe *p) 146 { 147 void *addr = p->addr; 148 u32 insn = BRK64_OPCODE_KPROBES; 149 150 aarch64_insn_patch_text(&addr, &insn, 1); 151 } 152 153 /* disarm kprobe: remove breakpoint from text */ 154 void __kprobes arch_disarm_kprobe(struct kprobe *p) 155 { 156 void *addr = p->addr; 157 u32 insn = le32_to_cpu(p->opcode); 158 159 aarch64_insn_patch_text(&addr, &insn, 1); 160 } 161 162 void __kprobes arch_remove_kprobe(struct kprobe *p) 163 { 164 if (p->ainsn.xol_insn) { 165 free_insn_slot(p->ainsn.xol_insn, 0); 166 p->ainsn.xol_insn = NULL; 167 } 168 } 169 170 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 171 { 172 kcb->prev_kprobe.kp = kprobe_running(); 173 kcb->prev_kprobe.status = kcb->kprobe_status; 174 } 175 176 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 177 { 178 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 179 kcb->kprobe_status = kcb->prev_kprobe.status; 180 } 181 182 static void __kprobes set_current_kprobe(struct kprobe *p) 183 { 184 __this_cpu_write(current_kprobe, p); 185 } 186 187 /* 188 * Mask all of DAIF while executing the instruction out-of-line, to keep things 189 * simple and avoid nesting exceptions. Interrupts do have to be disabled since 190 * the kprobe state is per-CPU and doesn't get migrated. 191 */ 192 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 193 struct pt_regs *regs) 194 { 195 kcb->saved_irqflag = regs->pstate & DAIF_MASK; 196 regs->pstate |= DAIF_MASK; 197 } 198 199 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 200 struct pt_regs *regs) 201 { 202 regs->pstate &= ~DAIF_MASK; 203 regs->pstate |= kcb->saved_irqflag; 204 } 205 206 static void __kprobes setup_singlestep(struct kprobe *p, 207 struct pt_regs *regs, 208 struct kprobe_ctlblk *kcb, int reenter) 209 { 210 unsigned long slot; 211 212 if (reenter) { 213 save_previous_kprobe(kcb); 214 set_current_kprobe(p); 215 kcb->kprobe_status = KPROBE_REENTER; 216 } else { 217 kcb->kprobe_status = KPROBE_HIT_SS; 218 } 219 220 221 if (p->ainsn.xol_insn) { 222 /* prepare for single stepping */ 223 slot = (unsigned long)p->ainsn.xol_insn; 224 225 kprobes_save_local_irqflag(kcb, regs); 226 instruction_pointer_set(regs, slot); 227 } else { 228 /* insn simulation */ 229 arch_simulate_insn(p, regs); 230 } 231 } 232 233 static int __kprobes reenter_kprobe(struct kprobe *p, 234 struct pt_regs *regs, 235 struct kprobe_ctlblk *kcb) 236 { 237 switch (kcb->kprobe_status) { 238 case KPROBE_HIT_SSDONE: 239 case KPROBE_HIT_ACTIVE: 240 kprobes_inc_nmissed_count(p); 241 setup_singlestep(p, regs, kcb, 1); 242 break; 243 case KPROBE_HIT_SS: 244 case KPROBE_REENTER: 245 pr_warn("Failed to recover from reentered kprobes.\n"); 246 dump_kprobe(p); 247 BUG(); 248 break; 249 default: 250 WARN_ON(1); 251 return 0; 252 } 253 254 return 1; 255 } 256 257 static void __kprobes 258 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) 259 { 260 /* return addr restore if non-branching insn */ 261 if (cur->ainsn.xol_restore != 0) 262 instruction_pointer_set(regs, cur->ainsn.xol_restore); 263 264 /* restore back original saved kprobe variables and continue */ 265 if (kcb->kprobe_status == KPROBE_REENTER) { 266 restore_previous_kprobe(kcb); 267 return; 268 } 269 /* call post handler */ 270 kcb->kprobe_status = KPROBE_HIT_SSDONE; 271 if (cur->post_handler) 272 cur->post_handler(cur, regs, 0); 273 274 reset_current_kprobe(); 275 } 276 277 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) 278 { 279 struct kprobe *cur = kprobe_running(); 280 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 281 282 switch (kcb->kprobe_status) { 283 case KPROBE_HIT_SS: 284 case KPROBE_REENTER: 285 /* 286 * We are here because the instruction being single 287 * stepped caused a page fault. We reset the current 288 * kprobe and the ip points back to the probe address 289 * and allow the page fault handler to continue as a 290 * normal page fault. 291 */ 292 instruction_pointer_set(regs, (unsigned long) cur->addr); 293 BUG_ON(!instruction_pointer(regs)); 294 295 if (kcb->kprobe_status == KPROBE_REENTER) { 296 restore_previous_kprobe(kcb); 297 } else { 298 kprobes_restore_local_irqflag(kcb, regs); 299 reset_current_kprobe(); 300 } 301 302 break; 303 } 304 return 0; 305 } 306 307 int __kprobes 308 kprobe_brk_handler(struct pt_regs *regs, unsigned long esr) 309 { 310 struct kprobe *p, *cur_kprobe; 311 struct kprobe_ctlblk *kcb; 312 unsigned long addr = instruction_pointer(regs); 313 314 kcb = get_kprobe_ctlblk(); 315 cur_kprobe = kprobe_running(); 316 317 p = get_kprobe((kprobe_opcode_t *) addr); 318 if (WARN_ON_ONCE(!p)) { 319 /* 320 * Something went wrong. This BRK used an immediate reserved 321 * for kprobes, but we couldn't find any corresponding probe. 322 */ 323 return DBG_HOOK_ERROR; 324 } 325 326 if (cur_kprobe) { 327 /* Hit a kprobe inside another kprobe */ 328 if (!reenter_kprobe(p, regs, kcb)) 329 return DBG_HOOK_ERROR; 330 } else { 331 /* Probe hit */ 332 set_current_kprobe(p); 333 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 334 335 /* 336 * If we have no pre-handler or it returned 0, we 337 * continue with normal processing. If we have a 338 * pre-handler and it returned non-zero, it will 339 * modify the execution path and not need to single-step 340 * Let's just reset current kprobe and exit. 341 */ 342 if (!p->pre_handler || !p->pre_handler(p, regs)) 343 setup_singlestep(p, regs, kcb, 0); 344 else 345 reset_current_kprobe(); 346 } 347 348 return DBG_HOOK_HANDLED; 349 } 350 351 int __kprobes 352 kprobe_ss_brk_handler(struct pt_regs *regs, unsigned long esr) 353 { 354 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 355 unsigned long addr = instruction_pointer(regs); 356 struct kprobe *cur = kprobe_running(); 357 358 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && 359 ((unsigned long)&cur->ainsn.xol_insn[1] == addr)) { 360 kprobes_restore_local_irqflag(kcb, regs); 361 post_kprobe_handler(cur, kcb, regs); 362 363 return DBG_HOOK_HANDLED; 364 } 365 366 /* not ours, kprobes should ignore it */ 367 return DBG_HOOK_ERROR; 368 } 369 370 int __kprobes 371 kretprobe_brk_handler(struct pt_regs *regs, unsigned long esr) 372 { 373 if (regs->pc != (unsigned long)__kretprobe_trampoline) 374 return DBG_HOOK_ERROR; 375 376 regs->pc = kretprobe_trampoline_handler(regs, (void *)regs->regs[29]); 377 return DBG_HOOK_HANDLED; 378 } 379 380 /* 381 * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 382 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 383 */ 384 int __init arch_populate_kprobe_blacklist(void) 385 { 386 int ret; 387 388 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start, 389 (unsigned long)__entry_text_end); 390 if (ret) 391 return ret; 392 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 393 (unsigned long)__irqentry_text_end); 394 if (ret) 395 return ret; 396 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, 397 (unsigned long)__hyp_text_end); 398 if (ret || is_kernel_in_hyp_mode()) 399 return ret; 400 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start, 401 (unsigned long)__hyp_idmap_text_end); 402 return ret; 403 } 404 405 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 406 struct pt_regs *regs) 407 { 408 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; 409 ri->fp = (void *)regs->regs[29]; 410 411 /* replace return addr (x30) with trampoline */ 412 regs->regs[30] = (long)&__kretprobe_trampoline; 413 } 414 415 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 416 { 417 return 0; 418 } 419 420 int __init arch_init_kprobes(void) 421 { 422 return 0; 423 } 424