1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #define pr_fmt(fmt) "kprobes: " fmt 11 12 #include <linux/kprobes.h> 13 #include <linux/ptrace.h> 14 #include <linux/preempt.h> 15 #include <linux/stop_machine.h> 16 #include <linux/kdebug.h> 17 #include <linux/uaccess.h> 18 #include <linux/extable.h> 19 #include <linux/module.h> 20 #include <linux/slab.h> 21 #include <linux/hardirq.h> 22 #include <linux/ftrace.h> 23 #include <linux/execmem.h> 24 #include <asm/set_memory.h> 25 #include <asm/sections.h> 26 #include <asm/dis.h> 27 #include "entry.h" 28 29 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 30 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 31 32 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 33 34 void *alloc_insn_page(void) 35 { 36 void *page; 37 38 page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); 39 if (!page) 40 return NULL; 41 set_memory_rox((unsigned long)page, 1); 42 return page; 43 } 44 45 static void copy_instruction(struct kprobe *p) 46 { 47 kprobe_opcode_t insn[MAX_INSN_SIZE]; 48 s64 disp, new_disp; 49 u64 addr, new_addr; 50 unsigned int len; 51 52 len = insn_length(*p->addr >> 8); 53 memcpy(&insn, p->addr, len); 54 p->opcode = insn[0]; 55 if (probe_is_insn_relative_long(&insn[0])) { 56 /* 57 * For pc-relative instructions in RIL-b or RIL-c format patch 58 * the RI2 displacement field. The insn slot for the to be 59 * patched instruction is within the same 4GB area like the 60 * original instruction. Therefore the new displacement will 61 * always fit. 62 */ 63 disp = *(s32 *)&insn[1]; 64 addr = (u64)(unsigned long)p->addr; 65 new_addr = (u64)(unsigned long)p->ainsn.insn; 66 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 67 *(s32 *)&insn[1] = new_disp; 68 } 69 s390_kernel_write(p->ainsn.insn, &insn, len); 70 } 71 NOKPROBE_SYMBOL(copy_instruction); 72 73 /* Check if paddr is at an instruction boundary */ 74 static bool can_probe(unsigned long paddr) 75 { 76 unsigned long addr, offset = 0; 77 kprobe_opcode_t insn; 78 struct kprobe *kp; 79 80 if (paddr & 0x01) 81 return false; 82 83 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) 84 return false; 85 86 /* Decode instructions */ 87 addr = paddr - offset; 88 while (addr < paddr) { 89 if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn))) 90 return false; 91 92 if (insn >> 8 == 0) { 93 if (insn != BREAKPOINT_INSTRUCTION) { 94 /* 95 * Note that QEMU inserts opcode 0x0000 to implement 96 * software breakpoints for guests. Since the size of 97 * the original instruction is unknown, stop following 98 * instructions and prevent setting a kprobe. 99 */ 100 return false; 101 } 102 /* 103 * Check if the instruction has been modified by another 104 * kprobe, in which case the original instruction is 105 * decoded. 106 */ 107 kp = get_kprobe((void *)addr); 108 if (!kp) { 109 /* not a kprobe */ 110 return false; 111 } 112 insn = kp->opcode; 113 } 114 addr += insn_length(insn >> 8); 115 } 116 return addr == paddr; 117 } 118 119 int arch_prepare_kprobe(struct kprobe *p) 120 { 121 if (!can_probe((unsigned long)p->addr)) 122 return -EINVAL; 123 /* Make sure the probe isn't going on a difficult instruction */ 124 if (probe_is_prohibited_opcode(p->addr)) 125 return -EINVAL; 126 p->ainsn.insn = get_insn_slot(); 127 if (!p->ainsn.insn) 128 return -ENOMEM; 129 copy_instruction(p); 130 return 0; 131 } 132 NOKPROBE_SYMBOL(arch_prepare_kprobe); 133 134 struct swap_insn_args { 135 struct kprobe *p; 136 unsigned int arm_kprobe : 1; 137 }; 138 139 static int swap_instruction(void *data) 140 { 141 struct swap_insn_args *args = data; 142 struct kprobe *p = args->p; 143 u16 opc; 144 145 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 146 s390_kernel_write(p->addr, &opc, sizeof(opc)); 147 return 0; 148 } 149 NOKPROBE_SYMBOL(swap_instruction); 150 151 void arch_arm_kprobe(struct kprobe *p) 152 { 153 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 154 155 stop_machine_cpuslocked(swap_instruction, &args, NULL); 156 } 157 NOKPROBE_SYMBOL(arch_arm_kprobe); 158 159 void arch_disarm_kprobe(struct kprobe *p) 160 { 161 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 162 163 stop_machine_cpuslocked(swap_instruction, &args, NULL); 164 } 165 NOKPROBE_SYMBOL(arch_disarm_kprobe); 166 167 void arch_remove_kprobe(struct kprobe *p) 168 { 169 if (!p->ainsn.insn) 170 return; 171 free_insn_slot(p->ainsn.insn, 0); 172 p->ainsn.insn = NULL; 173 } 174 NOKPROBE_SYMBOL(arch_remove_kprobe); 175 176 static void enable_singlestep(struct kprobe_ctlblk *kcb, 177 struct pt_regs *regs, 178 unsigned long ip) 179 { 180 union { 181 struct ctlreg regs[3]; 182 struct { 183 struct ctlreg control; 184 struct ctlreg start; 185 struct ctlreg end; 186 }; 187 } per_kprobe; 188 189 /* Set up the PER control registers %cr9-%cr11 */ 190 per_kprobe.control.val = PER_EVENT_IFETCH; 191 per_kprobe.start.val = ip; 192 per_kprobe.end.val = ip; 193 194 /* Save control regs and psw mask */ 195 __local_ctl_store(9, 11, kcb->kprobe_saved_ctl); 196 kcb->kprobe_saved_imask = regs->psw.mask & 197 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 198 199 /* Set PER control regs, turns on single step for the given address */ 200 __local_ctl_load(9, 11, per_kprobe.regs); 201 regs->psw.mask |= PSW_MASK_PER; 202 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 203 regs->psw.addr = ip; 204 } 205 NOKPROBE_SYMBOL(enable_singlestep); 206 207 static void disable_singlestep(struct kprobe_ctlblk *kcb, 208 struct pt_regs *regs, 209 unsigned long ip) 210 { 211 /* Restore control regs and psw mask, set new psw address */ 212 __local_ctl_load(9, 11, kcb->kprobe_saved_ctl); 213 regs->psw.mask &= ~PSW_MASK_PER; 214 regs->psw.mask |= kcb->kprobe_saved_imask; 215 regs->psw.addr = ip; 216 } 217 NOKPROBE_SYMBOL(disable_singlestep); 218 219 /* 220 * Activate a kprobe by storing its pointer to current_kprobe. The 221 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 222 * two kprobes can be active, see KPROBE_REENTER. 223 */ 224 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 225 { 226 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 227 kcb->prev_kprobe.status = kcb->kprobe_status; 228 __this_cpu_write(current_kprobe, p); 229 } 230 NOKPROBE_SYMBOL(push_kprobe); 231 232 /* 233 * Deactivate a kprobe by backing up to the previous state. If the 234 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 235 * for any other state prev_kprobe.kp will be NULL. 236 */ 237 static void pop_kprobe(struct kprobe_ctlblk *kcb) 238 { 239 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 240 kcb->kprobe_status = kcb->prev_kprobe.status; 241 kcb->prev_kprobe.kp = NULL; 242 } 243 NOKPROBE_SYMBOL(pop_kprobe); 244 245 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 246 { 247 switch (kcb->kprobe_status) { 248 case KPROBE_HIT_SSDONE: 249 case KPROBE_HIT_ACTIVE: 250 kprobes_inc_nmissed_count(p); 251 break; 252 case KPROBE_HIT_SS: 253 case KPROBE_REENTER: 254 default: 255 /* 256 * A kprobe on the code path to single step an instruction 257 * is a BUG. The code path resides in the .kprobes.text 258 * section and is executed with interrupts disabled. 259 */ 260 pr_err("Failed to recover from reentered kprobes.\n"); 261 dump_kprobe(p); 262 BUG(); 263 } 264 } 265 NOKPROBE_SYMBOL(kprobe_reenter_check); 266 267 static int kprobe_handler(struct pt_regs *regs) 268 { 269 struct kprobe_ctlblk *kcb; 270 struct kprobe *p; 271 272 /* 273 * We want to disable preemption for the entire duration of kprobe 274 * processing. That includes the calls to the pre/post handlers 275 * and single stepping the kprobe instruction. 276 */ 277 preempt_disable(); 278 kcb = get_kprobe_ctlblk(); 279 p = get_kprobe((void *)(regs->psw.addr - 2)); 280 281 if (p) { 282 if (kprobe_running()) { 283 /* 284 * We have hit a kprobe while another is still 285 * active. This can happen in the pre and post 286 * handler. Single step the instruction of the 287 * new probe but do not call any handler function 288 * of this secondary kprobe. 289 * push_kprobe and pop_kprobe saves and restores 290 * the currently active kprobe. 291 */ 292 kprobe_reenter_check(kcb, p); 293 push_kprobe(kcb, p); 294 kcb->kprobe_status = KPROBE_REENTER; 295 } else { 296 /* 297 * If we have no pre-handler or it returned 0, we 298 * continue with single stepping. If we have a 299 * pre-handler and it returned non-zero, it prepped 300 * for changing execution path, so get out doing 301 * nothing more here. 302 */ 303 push_kprobe(kcb, p); 304 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 305 if (p->pre_handler && p->pre_handler(p, regs)) { 306 pop_kprobe(kcb); 307 preempt_enable_no_resched(); 308 return 1; 309 } 310 kcb->kprobe_status = KPROBE_HIT_SS; 311 } 312 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 313 return 1; 314 } /* else: 315 * No kprobe at this address and no active kprobe. The trap has 316 * not been caused by a kprobe breakpoint. The race of breakpoint 317 * vs. kprobe remove does not exist because on s390 as we use 318 * stop_machine to arm/disarm the breakpoints. 319 */ 320 preempt_enable_no_resched(); 321 return 0; 322 } 323 NOKPROBE_SYMBOL(kprobe_handler); 324 325 /* 326 * Called after single-stepping. p->addr is the address of the 327 * instruction whose first byte has been replaced by the "breakpoint" 328 * instruction. To avoid the SMP problems that can occur when we 329 * temporarily put back the original opcode to single-step, we 330 * single-stepped a copy of the instruction. The address of this 331 * copy is p->ainsn.insn. 332 */ 333 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 334 { 335 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 336 unsigned long ip = regs->psw.addr; 337 int fixup = probe_get_fixup_type(p->ainsn.insn); 338 339 if (fixup & FIXUP_PSW_NORMAL) 340 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 341 342 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 343 int ilen = insn_length(p->ainsn.insn[0] >> 8); 344 if (ip - (unsigned long) p->ainsn.insn == ilen) 345 ip = (unsigned long) p->addr + ilen; 346 } 347 348 if (fixup & FIXUP_RETURN_REGISTER) { 349 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 350 regs->gprs[reg] += (unsigned long) p->addr - 351 (unsigned long) p->ainsn.insn; 352 } 353 354 disable_singlestep(kcb, regs, ip); 355 } 356 NOKPROBE_SYMBOL(resume_execution); 357 358 static int post_kprobe_handler(struct pt_regs *regs) 359 { 360 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 361 struct kprobe *p = kprobe_running(); 362 363 if (!p) 364 return 0; 365 366 resume_execution(p, regs); 367 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 368 kcb->kprobe_status = KPROBE_HIT_SSDONE; 369 p->post_handler(p, regs, 0); 370 } 371 pop_kprobe(kcb); 372 preempt_enable_no_resched(); 373 374 /* 375 * if somebody else is singlestepping across a probe point, psw mask 376 * will have PER set, in which case, continue the remaining processing 377 * of do_single_step, as if this is not a probe hit. 378 */ 379 if (regs->psw.mask & PSW_MASK_PER) 380 return 0; 381 382 return 1; 383 } 384 NOKPROBE_SYMBOL(post_kprobe_handler); 385 386 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 387 { 388 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 389 struct kprobe *p = kprobe_running(); 390 391 switch(kcb->kprobe_status) { 392 case KPROBE_HIT_SS: 393 case KPROBE_REENTER: 394 /* 395 * We are here because the instruction being single 396 * stepped caused a page fault. We reset the current 397 * kprobe and the nip points back to the probe address 398 * and allow the page fault handler to continue as a 399 * normal page fault. 400 */ 401 disable_singlestep(kcb, regs, (unsigned long) p->addr); 402 pop_kprobe(kcb); 403 preempt_enable_no_resched(); 404 break; 405 case KPROBE_HIT_ACTIVE: 406 case KPROBE_HIT_SSDONE: 407 /* 408 * In case the user-specified fault handler returned 409 * zero, try to fix up. 410 */ 411 if (fixup_exception(regs)) 412 return 1; 413 /* 414 * fixup_exception() could not handle it, 415 * Let do_page_fault() fix it. 416 */ 417 break; 418 default: 419 break; 420 } 421 return 0; 422 } 423 NOKPROBE_SYMBOL(kprobe_trap_handler); 424 425 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 426 { 427 int ret; 428 429 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 430 local_irq_disable(); 431 ret = kprobe_trap_handler(regs, trapnr); 432 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 433 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 434 return ret; 435 } 436 NOKPROBE_SYMBOL(kprobe_fault_handler); 437 438 /* 439 * Wrapper routine to for handling exceptions. 440 */ 441 int kprobe_exceptions_notify(struct notifier_block *self, 442 unsigned long val, void *data) 443 { 444 struct die_args *args = (struct die_args *) data; 445 struct pt_regs *regs = args->regs; 446 int ret = NOTIFY_DONE; 447 448 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 449 local_irq_disable(); 450 451 switch (val) { 452 case DIE_BPT: 453 if (kprobe_handler(regs)) 454 ret = NOTIFY_STOP; 455 break; 456 case DIE_SSTEP: 457 if (post_kprobe_handler(regs)) 458 ret = NOTIFY_STOP; 459 break; 460 case DIE_TRAP: 461 if (!preemptible() && kprobe_running() && 462 kprobe_trap_handler(regs, args->trapnr)) 463 ret = NOTIFY_STOP; 464 break; 465 default: 466 break; 467 } 468 469 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 470 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 471 472 return ret; 473 } 474 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 475 476 int __init arch_init_kprobes(void) 477 { 478 return 0; 479 } 480 481 int arch_trampoline_kprobe(struct kprobe *p) 482 { 483 return 0; 484 } 485 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 486