1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #define pr_fmt(fmt) "kprobes: " fmt 11 12 #include <linux/kprobes.h> 13 #include <linux/ptrace.h> 14 #include <linux/preempt.h> 15 #include <linux/stop_machine.h> 16 #include <linux/kdebug.h> 17 #include <linux/uaccess.h> 18 #include <linux/extable.h> 19 #include <linux/module.h> 20 #include <linux/slab.h> 21 #include <linux/hardirq.h> 22 #include <linux/ftrace.h> 23 #include <linux/execmem.h> 24 #include <asm/text-patching.h> 25 #include <asm/set_memory.h> 26 #include <asm/sections.h> 27 #include <asm/dis.h> 28 #include "entry.h" 29 30 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 31 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 32 33 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 34 35 void *alloc_insn_page(void) 36 { 37 void *page; 38 39 page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); 40 if (!page) 41 return NULL; 42 set_memory_rox((unsigned long)page, 1); 43 return page; 44 } 45 46 static void copy_instruction(struct kprobe *p) 47 { 48 kprobe_opcode_t insn[MAX_INSN_SIZE]; 49 s64 disp, new_disp; 50 u64 addr, new_addr; 51 unsigned int len; 52 53 len = insn_length(*p->addr >> 8); 54 memcpy(&insn, p->addr, len); 55 p->opcode = insn[0]; 56 if (probe_is_insn_relative_long(&insn[0])) { 57 /* 58 * For pc-relative instructions in RIL-b or RIL-c format patch 59 * the RI2 displacement field. The insn slot for the to be 60 * patched instruction is within the same 4GB area like the 61 * original instruction. Therefore the new displacement will 62 * always fit. 63 */ 64 disp = *(s32 *)&insn[1]; 65 addr = (u64)(unsigned long)p->addr; 66 new_addr = (u64)(unsigned long)p->ainsn.insn; 67 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 68 *(s32 *)&insn[1] = new_disp; 69 } 70 s390_kernel_write(p->ainsn.insn, &insn, len); 71 } 72 NOKPROBE_SYMBOL(copy_instruction); 73 74 /* Check if paddr is at an instruction boundary */ 75 static bool can_probe(unsigned long paddr) 76 { 77 unsigned long addr, offset = 0; 78 kprobe_opcode_t insn; 79 struct kprobe *kp; 80 81 if (paddr & 0x01) 82 return false; 83 84 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) 85 return false; 86 87 /* Decode instructions */ 88 addr = paddr - offset; 89 while (addr < paddr) { 90 if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn))) 91 return false; 92 93 if (insn >> 8 == 0) { 94 if (insn != BREAKPOINT_INSTRUCTION) { 95 /* 96 * Note that QEMU inserts opcode 0x0000 to implement 97 * software breakpoints for guests. Since the size of 98 * the original instruction is unknown, stop following 99 * instructions and prevent setting a kprobe. 100 */ 101 return false; 102 } 103 /* 104 * Check if the instruction has been modified by another 105 * kprobe, in which case the original instruction is 106 * decoded. 107 */ 108 kp = get_kprobe((void *)addr); 109 if (!kp) { 110 /* not a kprobe */ 111 return false; 112 } 113 insn = kp->opcode; 114 } 115 addr += insn_length(insn >> 8); 116 } 117 return addr == paddr; 118 } 119 120 int arch_prepare_kprobe(struct kprobe *p) 121 { 122 if (!can_probe((unsigned long)p->addr)) 123 return -EINVAL; 124 /* Make sure the probe isn't going on a difficult instruction */ 125 if (probe_is_prohibited_opcode(p->addr)) 126 return -EINVAL; 127 p->ainsn.insn = get_insn_slot(); 128 if (!p->ainsn.insn) 129 return -ENOMEM; 130 copy_instruction(p); 131 return 0; 132 } 133 NOKPROBE_SYMBOL(arch_prepare_kprobe); 134 135 struct swap_insn_args { 136 struct kprobe *p; 137 unsigned int arm_kprobe : 1; 138 }; 139 140 static int swap_instruction(void *data) 141 { 142 struct swap_insn_args *args = data; 143 struct kprobe *p = args->p; 144 u16 opc; 145 146 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 147 s390_kernel_write(p->addr, &opc, sizeof(opc)); 148 return 0; 149 } 150 NOKPROBE_SYMBOL(swap_instruction); 151 152 void arch_arm_kprobe(struct kprobe *p) 153 { 154 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 155 156 if (MACHINE_HAS_SEQ_INSN) { 157 swap_instruction(&args); 158 text_poke_sync(); 159 } else { 160 stop_machine_cpuslocked(swap_instruction, &args, NULL); 161 } 162 } 163 NOKPROBE_SYMBOL(arch_arm_kprobe); 164 165 void arch_disarm_kprobe(struct kprobe *p) 166 { 167 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 168 169 if (MACHINE_HAS_SEQ_INSN) { 170 swap_instruction(&args); 171 text_poke_sync(); 172 } else { 173 stop_machine_cpuslocked(swap_instruction, &args, NULL); 174 } 175 } 176 NOKPROBE_SYMBOL(arch_disarm_kprobe); 177 178 void arch_remove_kprobe(struct kprobe *p) 179 { 180 if (!p->ainsn.insn) 181 return; 182 free_insn_slot(p->ainsn.insn, 0); 183 p->ainsn.insn = NULL; 184 } 185 NOKPROBE_SYMBOL(arch_remove_kprobe); 186 187 static void enable_singlestep(struct kprobe_ctlblk *kcb, 188 struct pt_regs *regs, 189 unsigned long ip) 190 { 191 union { 192 struct ctlreg regs[3]; 193 struct { 194 struct ctlreg control; 195 struct ctlreg start; 196 struct ctlreg end; 197 }; 198 } per_kprobe; 199 200 /* Set up the PER control registers %cr9-%cr11 */ 201 per_kprobe.control.val = PER_EVENT_IFETCH; 202 per_kprobe.start.val = ip; 203 per_kprobe.end.val = ip; 204 205 /* Save control regs and psw mask */ 206 __local_ctl_store(9, 11, kcb->kprobe_saved_ctl); 207 kcb->kprobe_saved_imask = regs->psw.mask & 208 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 209 210 /* Set PER control regs, turns on single step for the given address */ 211 __local_ctl_load(9, 11, per_kprobe.regs); 212 regs->psw.mask |= PSW_MASK_PER; 213 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 214 regs->psw.addr = ip; 215 } 216 NOKPROBE_SYMBOL(enable_singlestep); 217 218 static void disable_singlestep(struct kprobe_ctlblk *kcb, 219 struct pt_regs *regs, 220 unsigned long ip) 221 { 222 /* Restore control regs and psw mask, set new psw address */ 223 __local_ctl_load(9, 11, kcb->kprobe_saved_ctl); 224 regs->psw.mask &= ~PSW_MASK_PER; 225 regs->psw.mask |= kcb->kprobe_saved_imask; 226 regs->psw.addr = ip; 227 } 228 NOKPROBE_SYMBOL(disable_singlestep); 229 230 /* 231 * Activate a kprobe by storing its pointer to current_kprobe. The 232 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 233 * two kprobes can be active, see KPROBE_REENTER. 234 */ 235 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 236 { 237 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 238 kcb->prev_kprobe.status = kcb->kprobe_status; 239 __this_cpu_write(current_kprobe, p); 240 } 241 NOKPROBE_SYMBOL(push_kprobe); 242 243 /* 244 * Deactivate a kprobe by backing up to the previous state. If the 245 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 246 * for any other state prev_kprobe.kp will be NULL. 247 */ 248 static void pop_kprobe(struct kprobe_ctlblk *kcb) 249 { 250 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 251 kcb->kprobe_status = kcb->prev_kprobe.status; 252 kcb->prev_kprobe.kp = NULL; 253 } 254 NOKPROBE_SYMBOL(pop_kprobe); 255 256 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 257 { 258 switch (kcb->kprobe_status) { 259 case KPROBE_HIT_SSDONE: 260 case KPROBE_HIT_ACTIVE: 261 kprobes_inc_nmissed_count(p); 262 break; 263 case KPROBE_HIT_SS: 264 case KPROBE_REENTER: 265 default: 266 /* 267 * A kprobe on the code path to single step an instruction 268 * is a BUG. The code path resides in the .kprobes.text 269 * section and is executed with interrupts disabled. 270 */ 271 pr_err("Failed to recover from reentered kprobes.\n"); 272 dump_kprobe(p); 273 BUG(); 274 } 275 } 276 NOKPROBE_SYMBOL(kprobe_reenter_check); 277 278 static int kprobe_handler(struct pt_regs *regs) 279 { 280 struct kprobe_ctlblk *kcb; 281 struct kprobe *p; 282 283 /* 284 * We want to disable preemption for the entire duration of kprobe 285 * processing. That includes the calls to the pre/post handlers 286 * and single stepping the kprobe instruction. 287 */ 288 preempt_disable(); 289 kcb = get_kprobe_ctlblk(); 290 p = get_kprobe((void *)(regs->psw.addr - 2)); 291 292 if (p) { 293 if (kprobe_running()) { 294 /* 295 * We have hit a kprobe while another is still 296 * active. This can happen in the pre and post 297 * handler. Single step the instruction of the 298 * new probe but do not call any handler function 299 * of this secondary kprobe. 300 * push_kprobe and pop_kprobe saves and restores 301 * the currently active kprobe. 302 */ 303 kprobe_reenter_check(kcb, p); 304 push_kprobe(kcb, p); 305 kcb->kprobe_status = KPROBE_REENTER; 306 } else { 307 /* 308 * If we have no pre-handler or it returned 0, we 309 * continue with single stepping. If we have a 310 * pre-handler and it returned non-zero, it prepped 311 * for changing execution path, so get out doing 312 * nothing more here. 313 */ 314 push_kprobe(kcb, p); 315 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 316 if (p->pre_handler && p->pre_handler(p, regs)) { 317 pop_kprobe(kcb); 318 preempt_enable_no_resched(); 319 return 1; 320 } 321 kcb->kprobe_status = KPROBE_HIT_SS; 322 } 323 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 324 return 1; 325 } /* else: 326 * No kprobe at this address and no active kprobe. The trap has 327 * not been caused by a kprobe breakpoint. The race of breakpoint 328 * vs. kprobe remove does not exist because on s390 as we use 329 * stop_machine to arm/disarm the breakpoints. 330 */ 331 preempt_enable_no_resched(); 332 return 0; 333 } 334 NOKPROBE_SYMBOL(kprobe_handler); 335 336 /* 337 * Called after single-stepping. p->addr is the address of the 338 * instruction whose first byte has been replaced by the "breakpoint" 339 * instruction. To avoid the SMP problems that can occur when we 340 * temporarily put back the original opcode to single-step, we 341 * single-stepped a copy of the instruction. The address of this 342 * copy is p->ainsn.insn. 343 */ 344 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 345 { 346 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 347 unsigned long ip = regs->psw.addr; 348 int fixup = probe_get_fixup_type(p->ainsn.insn); 349 350 if (fixup & FIXUP_PSW_NORMAL) 351 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 352 353 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 354 int ilen = insn_length(p->ainsn.insn[0] >> 8); 355 if (ip - (unsigned long) p->ainsn.insn == ilen) 356 ip = (unsigned long) p->addr + ilen; 357 } 358 359 if (fixup & FIXUP_RETURN_REGISTER) { 360 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 361 regs->gprs[reg] += (unsigned long) p->addr - 362 (unsigned long) p->ainsn.insn; 363 } 364 365 disable_singlestep(kcb, regs, ip); 366 } 367 NOKPROBE_SYMBOL(resume_execution); 368 369 static int post_kprobe_handler(struct pt_regs *regs) 370 { 371 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 372 struct kprobe *p = kprobe_running(); 373 374 if (!p) 375 return 0; 376 377 resume_execution(p, regs); 378 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 379 kcb->kprobe_status = KPROBE_HIT_SSDONE; 380 p->post_handler(p, regs, 0); 381 } 382 pop_kprobe(kcb); 383 preempt_enable_no_resched(); 384 385 /* 386 * if somebody else is singlestepping across a probe point, psw mask 387 * will have PER set, in which case, continue the remaining processing 388 * of do_single_step, as if this is not a probe hit. 389 */ 390 if (regs->psw.mask & PSW_MASK_PER) 391 return 0; 392 393 return 1; 394 } 395 NOKPROBE_SYMBOL(post_kprobe_handler); 396 397 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 398 { 399 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 400 struct kprobe *p = kprobe_running(); 401 402 switch(kcb->kprobe_status) { 403 case KPROBE_HIT_SS: 404 case KPROBE_REENTER: 405 /* 406 * We are here because the instruction being single 407 * stepped caused a page fault. We reset the current 408 * kprobe and the nip points back to the probe address 409 * and allow the page fault handler to continue as a 410 * normal page fault. 411 */ 412 disable_singlestep(kcb, regs, (unsigned long) p->addr); 413 pop_kprobe(kcb); 414 preempt_enable_no_resched(); 415 break; 416 case KPROBE_HIT_ACTIVE: 417 case KPROBE_HIT_SSDONE: 418 /* 419 * In case the user-specified fault handler returned 420 * zero, try to fix up. 421 */ 422 if (fixup_exception(regs)) 423 return 1; 424 /* 425 * fixup_exception() could not handle it, 426 * Let do_page_fault() fix it. 427 */ 428 break; 429 default: 430 break; 431 } 432 return 0; 433 } 434 NOKPROBE_SYMBOL(kprobe_trap_handler); 435 436 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 437 { 438 int ret; 439 440 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 441 local_irq_disable(); 442 ret = kprobe_trap_handler(regs, trapnr); 443 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 444 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 445 return ret; 446 } 447 NOKPROBE_SYMBOL(kprobe_fault_handler); 448 449 /* 450 * Wrapper routine to for handling exceptions. 451 */ 452 int kprobe_exceptions_notify(struct notifier_block *self, 453 unsigned long val, void *data) 454 { 455 struct die_args *args = (struct die_args *) data; 456 struct pt_regs *regs = args->regs; 457 int ret = NOTIFY_DONE; 458 459 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 460 local_irq_disable(); 461 462 switch (val) { 463 case DIE_BPT: 464 if (kprobe_handler(regs)) 465 ret = NOTIFY_STOP; 466 break; 467 case DIE_SSTEP: 468 if (post_kprobe_handler(regs)) 469 ret = NOTIFY_STOP; 470 break; 471 case DIE_TRAP: 472 if (!preemptible() && kprobe_running() && 473 kprobe_trap_handler(regs, args->trapnr)) 474 ret = NOTIFY_STOP; 475 break; 476 default: 477 break; 478 } 479 480 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 481 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 482 483 return ret; 484 } 485 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 486 487 int __init arch_init_kprobes(void) 488 { 489 return 0; 490 } 491 492 int arch_trampoline_kprobe(struct kprobe *p) 493 { 494 return 0; 495 } 496 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 497