1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #include <linux/kprobes.h> 11 #include <linux/ptrace.h> 12 #include <linux/preempt.h> 13 #include <linux/stop_machine.h> 14 #include <linux/kdebug.h> 15 #include <linux/uaccess.h> 16 #include <linux/extable.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/hardirq.h> 20 #include <linux/ftrace.h> 21 #include <asm/set_memory.h> 22 #include <asm/sections.h> 23 #include <asm/dis.h> 24 25 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 26 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 27 28 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 29 30 DEFINE_INSN_CACHE_OPS(s390_insn); 31 32 static int insn_page_in_use; 33 static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE); 34 35 static void *alloc_s390_insn_page(void) 36 { 37 if (xchg(&insn_page_in_use, 1) == 1) 38 return NULL; 39 set_memory_x((unsigned long) &insn_page, 1); 40 return &insn_page; 41 } 42 43 static void free_s390_insn_page(void *page) 44 { 45 set_memory_nx((unsigned long) page, 1); 46 xchg(&insn_page_in_use, 0); 47 } 48 49 struct kprobe_insn_cache kprobe_s390_insn_slots = { 50 .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex), 51 .alloc = alloc_s390_insn_page, 52 .free = free_s390_insn_page, 53 .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), 54 .insn_size = MAX_INSN_SIZE, 55 }; 56 57 static void copy_instruction(struct kprobe *p) 58 { 59 s64 disp, new_disp; 60 u64 addr, new_addr; 61 62 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); 63 p->opcode = p->ainsn.insn[0]; 64 if (!probe_is_insn_relative_long(p->ainsn.insn)) 65 return; 66 /* 67 * For pc-relative instructions in RIL-b or RIL-c format patch the 68 * RI2 displacement field. We have already made sure that the insn 69 * slot for the patched instruction is within the same 2GB area 70 * as the original instruction (either kernel image or module area). 71 * Therefore the new displacement will always fit. 72 */ 73 disp = *(s32 *)&p->ainsn.insn[1]; 74 addr = (u64)(unsigned long)p->addr; 75 new_addr = (u64)(unsigned long)p->ainsn.insn; 76 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 77 *(s32 *)&p->ainsn.insn[1] = new_disp; 78 } 79 NOKPROBE_SYMBOL(copy_instruction); 80 81 static inline int is_kernel_addr(void *addr) 82 { 83 return addr < (void *)_end; 84 } 85 86 static int s390_get_insn_slot(struct kprobe *p) 87 { 88 /* 89 * Get an insn slot that is within the same 2GB area like the original 90 * instruction. That way instructions with a 32bit signed displacement 91 * field can be patched and executed within the insn slot. 92 */ 93 p->ainsn.insn = NULL; 94 if (is_kernel_addr(p->addr)) 95 p->ainsn.insn = get_s390_insn_slot(); 96 else if (is_module_addr(p->addr)) 97 p->ainsn.insn = get_insn_slot(); 98 return p->ainsn.insn ? 0 : -ENOMEM; 99 } 100 NOKPROBE_SYMBOL(s390_get_insn_slot); 101 102 static void s390_free_insn_slot(struct kprobe *p) 103 { 104 if (!p->ainsn.insn) 105 return; 106 if (is_kernel_addr(p->addr)) 107 free_s390_insn_slot(p->ainsn.insn, 0); 108 else 109 free_insn_slot(p->ainsn.insn, 0); 110 p->ainsn.insn = NULL; 111 } 112 NOKPROBE_SYMBOL(s390_free_insn_slot); 113 114 int arch_prepare_kprobe(struct kprobe *p) 115 { 116 if ((unsigned long) p->addr & 0x01) 117 return -EINVAL; 118 /* Make sure the probe isn't going on a difficult instruction */ 119 if (probe_is_prohibited_opcode(p->addr)) 120 return -EINVAL; 121 if (s390_get_insn_slot(p)) 122 return -ENOMEM; 123 copy_instruction(p); 124 return 0; 125 } 126 NOKPROBE_SYMBOL(arch_prepare_kprobe); 127 128 struct swap_insn_args { 129 struct kprobe *p; 130 unsigned int arm_kprobe : 1; 131 }; 132 133 static int swap_instruction(void *data) 134 { 135 struct swap_insn_args *args = data; 136 struct kprobe *p = args->p; 137 u16 opc; 138 139 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 140 s390_kernel_write(p->addr, &opc, sizeof(opc)); 141 return 0; 142 } 143 NOKPROBE_SYMBOL(swap_instruction); 144 145 void arch_arm_kprobe(struct kprobe *p) 146 { 147 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 148 149 stop_machine_cpuslocked(swap_instruction, &args, NULL); 150 } 151 NOKPROBE_SYMBOL(arch_arm_kprobe); 152 153 void arch_disarm_kprobe(struct kprobe *p) 154 { 155 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 156 157 stop_machine_cpuslocked(swap_instruction, &args, NULL); 158 } 159 NOKPROBE_SYMBOL(arch_disarm_kprobe); 160 161 void arch_remove_kprobe(struct kprobe *p) 162 { 163 s390_free_insn_slot(p); 164 } 165 NOKPROBE_SYMBOL(arch_remove_kprobe); 166 167 static void enable_singlestep(struct kprobe_ctlblk *kcb, 168 struct pt_regs *regs, 169 unsigned long ip) 170 { 171 struct per_regs per_kprobe; 172 173 /* Set up the PER control registers %cr9-%cr11 */ 174 per_kprobe.control = PER_EVENT_IFETCH; 175 per_kprobe.start = ip; 176 per_kprobe.end = ip; 177 178 /* Save control regs and psw mask */ 179 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 180 kcb->kprobe_saved_imask = regs->psw.mask & 181 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 182 183 /* Set PER control regs, turns on single step for the given address */ 184 __ctl_load(per_kprobe, 9, 11); 185 regs->psw.mask |= PSW_MASK_PER; 186 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 187 regs->psw.addr = ip; 188 } 189 NOKPROBE_SYMBOL(enable_singlestep); 190 191 static void disable_singlestep(struct kprobe_ctlblk *kcb, 192 struct pt_regs *regs, 193 unsigned long ip) 194 { 195 /* Restore control regs and psw mask, set new psw address */ 196 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 197 regs->psw.mask &= ~PSW_MASK_PER; 198 regs->psw.mask |= kcb->kprobe_saved_imask; 199 regs->psw.addr = ip; 200 } 201 NOKPROBE_SYMBOL(disable_singlestep); 202 203 /* 204 * Activate a kprobe by storing its pointer to current_kprobe. The 205 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 206 * two kprobes can be active, see KPROBE_REENTER. 207 */ 208 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 209 { 210 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 211 kcb->prev_kprobe.status = kcb->kprobe_status; 212 __this_cpu_write(current_kprobe, p); 213 } 214 NOKPROBE_SYMBOL(push_kprobe); 215 216 /* 217 * Deactivate a kprobe by backing up to the previous state. If the 218 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 219 * for any other state prev_kprobe.kp will be NULL. 220 */ 221 static void pop_kprobe(struct kprobe_ctlblk *kcb) 222 { 223 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 224 kcb->kprobe_status = kcb->prev_kprobe.status; 225 } 226 NOKPROBE_SYMBOL(pop_kprobe); 227 228 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 229 { 230 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 231 232 /* Replace the return addr with trampoline addr */ 233 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 234 } 235 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 236 237 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 238 { 239 switch (kcb->kprobe_status) { 240 case KPROBE_HIT_SSDONE: 241 case KPROBE_HIT_ACTIVE: 242 kprobes_inc_nmissed_count(p); 243 break; 244 case KPROBE_HIT_SS: 245 case KPROBE_REENTER: 246 default: 247 /* 248 * A kprobe on the code path to single step an instruction 249 * is a BUG. The code path resides in the .kprobes.text 250 * section and is executed with interrupts disabled. 251 */ 252 pr_err("Invalid kprobe detected.\n"); 253 dump_kprobe(p); 254 BUG(); 255 } 256 } 257 NOKPROBE_SYMBOL(kprobe_reenter_check); 258 259 static int kprobe_handler(struct pt_regs *regs) 260 { 261 struct kprobe_ctlblk *kcb; 262 struct kprobe *p; 263 264 /* 265 * We want to disable preemption for the entire duration of kprobe 266 * processing. That includes the calls to the pre/post handlers 267 * and single stepping the kprobe instruction. 268 */ 269 preempt_disable(); 270 kcb = get_kprobe_ctlblk(); 271 p = get_kprobe((void *)(regs->psw.addr - 2)); 272 273 if (p) { 274 if (kprobe_running()) { 275 /* 276 * We have hit a kprobe while another is still 277 * active. This can happen in the pre and post 278 * handler. Single step the instruction of the 279 * new probe but do not call any handler function 280 * of this secondary kprobe. 281 * push_kprobe and pop_kprobe saves and restores 282 * the currently active kprobe. 283 */ 284 kprobe_reenter_check(kcb, p); 285 push_kprobe(kcb, p); 286 kcb->kprobe_status = KPROBE_REENTER; 287 } else { 288 /* 289 * If we have no pre-handler or it returned 0, we 290 * continue with single stepping. If we have a 291 * pre-handler and it returned non-zero, it prepped 292 * for changing execution path, so get out doing 293 * nothing more here. 294 */ 295 push_kprobe(kcb, p); 296 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 297 if (p->pre_handler && p->pre_handler(p, regs)) { 298 pop_kprobe(kcb); 299 preempt_enable_no_resched(); 300 return 1; 301 } 302 kcb->kprobe_status = KPROBE_HIT_SS; 303 } 304 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 305 return 1; 306 } /* else: 307 * No kprobe at this address and no active kprobe. The trap has 308 * not been caused by a kprobe breakpoint. The race of breakpoint 309 * vs. kprobe remove does not exist because on s390 as we use 310 * stop_machine to arm/disarm the breakpoints. 311 */ 312 preempt_enable_no_resched(); 313 return 0; 314 } 315 NOKPROBE_SYMBOL(kprobe_handler); 316 317 /* 318 * Function return probe trampoline: 319 * - init_kprobes() establishes a probepoint here 320 * - When the probed function returns, this probe 321 * causes the handlers to fire 322 */ 323 static void __used kretprobe_trampoline_holder(void) 324 { 325 asm volatile(".global kretprobe_trampoline\n" 326 "kretprobe_trampoline: bcr 0,0\n"); 327 } 328 329 /* 330 * Called when the probe at kretprobe trampoline is hit 331 */ 332 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 333 { 334 struct kretprobe_instance *ri; 335 struct hlist_head *head, empty_rp; 336 struct hlist_node *tmp; 337 unsigned long flags, orig_ret_address; 338 unsigned long trampoline_address; 339 kprobe_opcode_t *correct_ret_addr; 340 341 INIT_HLIST_HEAD(&empty_rp); 342 kretprobe_hash_lock(current, &head, &flags); 343 344 /* 345 * It is possible to have multiple instances associated with a given 346 * task either because an multiple functions in the call path 347 * have a return probe installed on them, and/or more than one return 348 * return probe was registered for a target function. 349 * 350 * We can handle this because: 351 * - instances are always inserted at the head of the list 352 * - when multiple return probes are registered for the same 353 * function, the first instance's ret_addr will point to the 354 * real return address, and all the rest will point to 355 * kretprobe_trampoline 356 */ 357 ri = NULL; 358 orig_ret_address = 0; 359 correct_ret_addr = NULL; 360 trampoline_address = (unsigned long) &kretprobe_trampoline; 361 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 362 if (ri->task != current) 363 /* another task is sharing our hash bucket */ 364 continue; 365 366 orig_ret_address = (unsigned long) ri->ret_addr; 367 368 if (orig_ret_address != trampoline_address) 369 /* 370 * This is the real return address. Any other 371 * instances associated with this task are for 372 * other calls deeper on the call stack 373 */ 374 break; 375 } 376 377 kretprobe_assert(ri, orig_ret_address, trampoline_address); 378 379 correct_ret_addr = ri->ret_addr; 380 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 381 if (ri->task != current) 382 /* another task is sharing our hash bucket */ 383 continue; 384 385 orig_ret_address = (unsigned long) ri->ret_addr; 386 387 if (ri->rp && ri->rp->handler) { 388 ri->ret_addr = correct_ret_addr; 389 ri->rp->handler(ri, regs); 390 } 391 392 recycle_rp_inst(ri, &empty_rp); 393 394 if (orig_ret_address != trampoline_address) 395 /* 396 * This is the real return address. Any other 397 * instances associated with this task are for 398 * other calls deeper on the call stack 399 */ 400 break; 401 } 402 403 regs->psw.addr = orig_ret_address; 404 405 kretprobe_hash_unlock(current, &flags); 406 407 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 408 hlist_del(&ri->hlist); 409 kfree(ri); 410 } 411 /* 412 * By returning a non-zero value, we are telling 413 * kprobe_handler() that we don't want the post_handler 414 * to run (and have re-enabled preemption) 415 */ 416 return 1; 417 } 418 NOKPROBE_SYMBOL(trampoline_probe_handler); 419 420 /* 421 * Called after single-stepping. p->addr is the address of the 422 * instruction whose first byte has been replaced by the "breakpoint" 423 * instruction. To avoid the SMP problems that can occur when we 424 * temporarily put back the original opcode to single-step, we 425 * single-stepped a copy of the instruction. The address of this 426 * copy is p->ainsn.insn. 427 */ 428 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 429 { 430 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 431 unsigned long ip = regs->psw.addr; 432 int fixup = probe_get_fixup_type(p->ainsn.insn); 433 434 if (fixup & FIXUP_PSW_NORMAL) 435 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 436 437 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 438 int ilen = insn_length(p->ainsn.insn[0] >> 8); 439 if (ip - (unsigned long) p->ainsn.insn == ilen) 440 ip = (unsigned long) p->addr + ilen; 441 } 442 443 if (fixup & FIXUP_RETURN_REGISTER) { 444 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 445 regs->gprs[reg] += (unsigned long) p->addr - 446 (unsigned long) p->ainsn.insn; 447 } 448 449 disable_singlestep(kcb, regs, ip); 450 } 451 NOKPROBE_SYMBOL(resume_execution); 452 453 static int post_kprobe_handler(struct pt_regs *regs) 454 { 455 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 456 struct kprobe *p = kprobe_running(); 457 458 if (!p) 459 return 0; 460 461 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 462 kcb->kprobe_status = KPROBE_HIT_SSDONE; 463 p->post_handler(p, regs, 0); 464 } 465 466 resume_execution(p, regs); 467 pop_kprobe(kcb); 468 preempt_enable_no_resched(); 469 470 /* 471 * if somebody else is singlestepping across a probe point, psw mask 472 * will have PER set, in which case, continue the remaining processing 473 * of do_single_step, as if this is not a probe hit. 474 */ 475 if (regs->psw.mask & PSW_MASK_PER) 476 return 0; 477 478 return 1; 479 } 480 NOKPROBE_SYMBOL(post_kprobe_handler); 481 482 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 483 { 484 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 485 struct kprobe *p = kprobe_running(); 486 const struct exception_table_entry *entry; 487 488 switch(kcb->kprobe_status) { 489 case KPROBE_HIT_SS: 490 case KPROBE_REENTER: 491 /* 492 * We are here because the instruction being single 493 * stepped caused a page fault. We reset the current 494 * kprobe and the nip points back to the probe address 495 * and allow the page fault handler to continue as a 496 * normal page fault. 497 */ 498 disable_singlestep(kcb, regs, (unsigned long) p->addr); 499 pop_kprobe(kcb); 500 preempt_enable_no_resched(); 501 break; 502 case KPROBE_HIT_ACTIVE: 503 case KPROBE_HIT_SSDONE: 504 /* 505 * We increment the nmissed count for accounting, 506 * we can also use npre/npostfault count for accounting 507 * these specific fault cases. 508 */ 509 kprobes_inc_nmissed_count(p); 510 511 /* 512 * We come here because instructions in the pre/post 513 * handler caused the page_fault, this could happen 514 * if handler tries to access user space by 515 * copy_from_user(), get_user() etc. Let the 516 * user-specified handler try to fix it first. 517 */ 518 if (p->fault_handler && p->fault_handler(p, regs, trapnr)) 519 return 1; 520 521 /* 522 * In case the user-specified fault handler returned 523 * zero, try to fix up. 524 */ 525 entry = s390_search_extables(regs->psw.addr); 526 if (entry) { 527 regs->psw.addr = extable_fixup(entry); 528 return 1; 529 } 530 531 /* 532 * fixup_exception() could not handle it, 533 * Let do_page_fault() fix it. 534 */ 535 break; 536 default: 537 break; 538 } 539 return 0; 540 } 541 NOKPROBE_SYMBOL(kprobe_trap_handler); 542 543 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 544 { 545 int ret; 546 547 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 548 local_irq_disable(); 549 ret = kprobe_trap_handler(regs, trapnr); 550 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 551 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 552 return ret; 553 } 554 NOKPROBE_SYMBOL(kprobe_fault_handler); 555 556 /* 557 * Wrapper routine to for handling exceptions. 558 */ 559 int kprobe_exceptions_notify(struct notifier_block *self, 560 unsigned long val, void *data) 561 { 562 struct die_args *args = (struct die_args *) data; 563 struct pt_regs *regs = args->regs; 564 int ret = NOTIFY_DONE; 565 566 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 567 local_irq_disable(); 568 569 switch (val) { 570 case DIE_BPT: 571 if (kprobe_handler(regs)) 572 ret = NOTIFY_STOP; 573 break; 574 case DIE_SSTEP: 575 if (post_kprobe_handler(regs)) 576 ret = NOTIFY_STOP; 577 break; 578 case DIE_TRAP: 579 if (!preemptible() && kprobe_running() && 580 kprobe_trap_handler(regs, args->trapnr)) 581 ret = NOTIFY_STOP; 582 break; 583 default: 584 break; 585 } 586 587 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 588 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 589 590 return ret; 591 } 592 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 593 594 static struct kprobe trampoline = { 595 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 596 .pre_handler = trampoline_probe_handler 597 }; 598 599 int __init arch_init_kprobes(void) 600 { 601 return register_kprobe(&trampoline); 602 } 603 604 int arch_trampoline_kprobe(struct kprobe *p) 605 { 606 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 607 } 608 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 609