1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright IBM Corp. 2002, 2006 6 * 7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 8 */ 9 10 #include <linux/moduleloader.h> 11 #include <linux/kprobes.h> 12 #include <linux/ptrace.h> 13 #include <linux/preempt.h> 14 #include <linux/stop_machine.h> 15 #include <linux/kdebug.h> 16 #include <linux/uaccess.h> 17 #include <linux/extable.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/hardirq.h> 21 #include <linux/ftrace.h> 22 #include <asm/set_memory.h> 23 #include <asm/sections.h> 24 #include <asm/dis.h> 25 #include "entry.h" 26 27 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 28 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 29 30 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 31 32 DEFINE_INSN_CACHE_OPS(s390_insn); 33 34 static int insn_page_in_use; 35 36 void *alloc_insn_page(void) 37 { 38 void *page; 39 40 page = module_alloc(PAGE_SIZE); 41 if (!page) 42 return NULL; 43 __set_memory((unsigned long) page, 1, SET_MEMORY_RO | SET_MEMORY_X); 44 return page; 45 } 46 47 static void *alloc_s390_insn_page(void) 48 { 49 if (xchg(&insn_page_in_use, 1) == 1) 50 return NULL; 51 return &kprobes_insn_page; 52 } 53 54 static void free_s390_insn_page(void *page) 55 { 56 xchg(&insn_page_in_use, 0); 57 } 58 59 struct kprobe_insn_cache kprobe_s390_insn_slots = { 60 .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex), 61 .alloc = alloc_s390_insn_page, 62 .free = free_s390_insn_page, 63 .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), 64 .insn_size = MAX_INSN_SIZE, 65 }; 66 67 static void copy_instruction(struct kprobe *p) 68 { 69 kprobe_opcode_t insn[MAX_INSN_SIZE]; 70 s64 disp, new_disp; 71 u64 addr, new_addr; 72 unsigned int len; 73 74 len = insn_length(*p->addr >> 8); 75 memcpy(&insn, p->addr, len); 76 p->opcode = insn[0]; 77 if (probe_is_insn_relative_long(&insn[0])) { 78 /* 79 * For pc-relative instructions in RIL-b or RIL-c format patch 80 * the RI2 displacement field. We have already made sure that 81 * the insn slot for the patched instruction is within the same 82 * 2GB area as the original instruction (either kernel image or 83 * module area). Therefore the new displacement will always fit. 84 */ 85 disp = *(s32 *)&insn[1]; 86 addr = (u64)(unsigned long)p->addr; 87 new_addr = (u64)(unsigned long)p->ainsn.insn; 88 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 89 *(s32 *)&insn[1] = new_disp; 90 } 91 s390_kernel_write(p->ainsn.insn, &insn, len); 92 } 93 NOKPROBE_SYMBOL(copy_instruction); 94 95 static inline int is_kernel_addr(void *addr) 96 { 97 return addr < (void *)_end; 98 } 99 100 static int s390_get_insn_slot(struct kprobe *p) 101 { 102 /* 103 * Get an insn slot that is within the same 2GB area like the original 104 * instruction. That way instructions with a 32bit signed displacement 105 * field can be patched and executed within the insn slot. 106 */ 107 p->ainsn.insn = NULL; 108 if (is_kernel_addr(p->addr)) 109 p->ainsn.insn = get_s390_insn_slot(); 110 else if (is_module_addr(p->addr)) 111 p->ainsn.insn = get_insn_slot(); 112 return p->ainsn.insn ? 0 : -ENOMEM; 113 } 114 NOKPROBE_SYMBOL(s390_get_insn_slot); 115 116 static void s390_free_insn_slot(struct kprobe *p) 117 { 118 if (!p->ainsn.insn) 119 return; 120 if (is_kernel_addr(p->addr)) 121 free_s390_insn_slot(p->ainsn.insn, 0); 122 else 123 free_insn_slot(p->ainsn.insn, 0); 124 p->ainsn.insn = NULL; 125 } 126 NOKPROBE_SYMBOL(s390_free_insn_slot); 127 128 int arch_prepare_kprobe(struct kprobe *p) 129 { 130 if ((unsigned long) p->addr & 0x01) 131 return -EINVAL; 132 /* Make sure the probe isn't going on a difficult instruction */ 133 if (probe_is_prohibited_opcode(p->addr)) 134 return -EINVAL; 135 if (s390_get_insn_slot(p)) 136 return -ENOMEM; 137 copy_instruction(p); 138 return 0; 139 } 140 NOKPROBE_SYMBOL(arch_prepare_kprobe); 141 142 struct swap_insn_args { 143 struct kprobe *p; 144 unsigned int arm_kprobe : 1; 145 }; 146 147 static int swap_instruction(void *data) 148 { 149 struct swap_insn_args *args = data; 150 struct kprobe *p = args->p; 151 u16 opc; 152 153 opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 154 s390_kernel_write(p->addr, &opc, sizeof(opc)); 155 return 0; 156 } 157 NOKPROBE_SYMBOL(swap_instruction); 158 159 void arch_arm_kprobe(struct kprobe *p) 160 { 161 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 162 163 stop_machine_cpuslocked(swap_instruction, &args, NULL); 164 } 165 NOKPROBE_SYMBOL(arch_arm_kprobe); 166 167 void arch_disarm_kprobe(struct kprobe *p) 168 { 169 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 170 171 stop_machine_cpuslocked(swap_instruction, &args, NULL); 172 } 173 NOKPROBE_SYMBOL(arch_disarm_kprobe); 174 175 void arch_remove_kprobe(struct kprobe *p) 176 { 177 s390_free_insn_slot(p); 178 } 179 NOKPROBE_SYMBOL(arch_remove_kprobe); 180 181 static void enable_singlestep(struct kprobe_ctlblk *kcb, 182 struct pt_regs *regs, 183 unsigned long ip) 184 { 185 struct per_regs per_kprobe; 186 187 /* Set up the PER control registers %cr9-%cr11 */ 188 per_kprobe.control = PER_EVENT_IFETCH; 189 per_kprobe.start = ip; 190 per_kprobe.end = ip; 191 192 /* Save control regs and psw mask */ 193 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 194 kcb->kprobe_saved_imask = regs->psw.mask & 195 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 196 197 /* Set PER control regs, turns on single step for the given address */ 198 __ctl_load(per_kprobe, 9, 11); 199 regs->psw.mask |= PSW_MASK_PER; 200 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 201 regs->psw.addr = ip; 202 } 203 NOKPROBE_SYMBOL(enable_singlestep); 204 205 static void disable_singlestep(struct kprobe_ctlblk *kcb, 206 struct pt_regs *regs, 207 unsigned long ip) 208 { 209 /* Restore control regs and psw mask, set new psw address */ 210 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 211 regs->psw.mask &= ~PSW_MASK_PER; 212 regs->psw.mask |= kcb->kprobe_saved_imask; 213 regs->psw.addr = ip; 214 } 215 NOKPROBE_SYMBOL(disable_singlestep); 216 217 /* 218 * Activate a kprobe by storing its pointer to current_kprobe. The 219 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 220 * two kprobes can be active, see KPROBE_REENTER. 221 */ 222 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 223 { 224 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 225 kcb->prev_kprobe.status = kcb->kprobe_status; 226 __this_cpu_write(current_kprobe, p); 227 } 228 NOKPROBE_SYMBOL(push_kprobe); 229 230 /* 231 * Deactivate a kprobe by backing up to the previous state. If the 232 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 233 * for any other state prev_kprobe.kp will be NULL. 234 */ 235 static void pop_kprobe(struct kprobe_ctlblk *kcb) 236 { 237 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 238 kcb->kprobe_status = kcb->prev_kprobe.status; 239 } 240 NOKPROBE_SYMBOL(pop_kprobe); 241 242 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 243 { 244 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 245 ri->fp = NULL; 246 247 /* Replace the return addr with trampoline addr */ 248 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 249 } 250 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 251 252 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 253 { 254 switch (kcb->kprobe_status) { 255 case KPROBE_HIT_SSDONE: 256 case KPROBE_HIT_ACTIVE: 257 kprobes_inc_nmissed_count(p); 258 break; 259 case KPROBE_HIT_SS: 260 case KPROBE_REENTER: 261 default: 262 /* 263 * A kprobe on the code path to single step an instruction 264 * is a BUG. The code path resides in the .kprobes.text 265 * section and is executed with interrupts disabled. 266 */ 267 pr_err("Invalid kprobe detected.\n"); 268 dump_kprobe(p); 269 BUG(); 270 } 271 } 272 NOKPROBE_SYMBOL(kprobe_reenter_check); 273 274 static int kprobe_handler(struct pt_regs *regs) 275 { 276 struct kprobe_ctlblk *kcb; 277 struct kprobe *p; 278 279 /* 280 * We want to disable preemption for the entire duration of kprobe 281 * processing. That includes the calls to the pre/post handlers 282 * and single stepping the kprobe instruction. 283 */ 284 preempt_disable(); 285 kcb = get_kprobe_ctlblk(); 286 p = get_kprobe((void *)(regs->psw.addr - 2)); 287 288 if (p) { 289 if (kprobe_running()) { 290 /* 291 * We have hit a kprobe while another is still 292 * active. This can happen in the pre and post 293 * handler. Single step the instruction of the 294 * new probe but do not call any handler function 295 * of this secondary kprobe. 296 * push_kprobe and pop_kprobe saves and restores 297 * the currently active kprobe. 298 */ 299 kprobe_reenter_check(kcb, p); 300 push_kprobe(kcb, p); 301 kcb->kprobe_status = KPROBE_REENTER; 302 } else { 303 /* 304 * If we have no pre-handler or it returned 0, we 305 * continue with single stepping. If we have a 306 * pre-handler and it returned non-zero, it prepped 307 * for changing execution path, so get out doing 308 * nothing more here. 309 */ 310 push_kprobe(kcb, p); 311 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 312 if (p->pre_handler && p->pre_handler(p, regs)) { 313 pop_kprobe(kcb); 314 preempt_enable_no_resched(); 315 return 1; 316 } 317 kcb->kprobe_status = KPROBE_HIT_SS; 318 } 319 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 320 return 1; 321 } /* else: 322 * No kprobe at this address and no active kprobe. The trap has 323 * not been caused by a kprobe breakpoint. The race of breakpoint 324 * vs. kprobe remove does not exist because on s390 as we use 325 * stop_machine to arm/disarm the breakpoints. 326 */ 327 preempt_enable_no_resched(); 328 return 0; 329 } 330 NOKPROBE_SYMBOL(kprobe_handler); 331 332 /* 333 * Function return probe trampoline: 334 * - init_kprobes() establishes a probepoint here 335 * - When the probed function returns, this probe 336 * causes the handlers to fire 337 */ 338 static void __used kretprobe_trampoline_holder(void) 339 { 340 asm volatile(".global kretprobe_trampoline\n" 341 "kretprobe_trampoline: bcr 0,0\n"); 342 } 343 344 /* 345 * Called when the probe at kretprobe trampoline is hit 346 */ 347 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 348 { 349 regs->psw.addr = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL); 350 /* 351 * By returning a non-zero value, we are telling 352 * kprobe_handler() that we don't want the post_handler 353 * to run (and have re-enabled preemption) 354 */ 355 return 1; 356 } 357 NOKPROBE_SYMBOL(trampoline_probe_handler); 358 359 /* 360 * Called after single-stepping. p->addr is the address of the 361 * instruction whose first byte has been replaced by the "breakpoint" 362 * instruction. To avoid the SMP problems that can occur when we 363 * temporarily put back the original opcode to single-step, we 364 * single-stepped a copy of the instruction. The address of this 365 * copy is p->ainsn.insn. 366 */ 367 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 368 { 369 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 370 unsigned long ip = regs->psw.addr; 371 int fixup = probe_get_fixup_type(p->ainsn.insn); 372 373 if (fixup & FIXUP_PSW_NORMAL) 374 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 375 376 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 377 int ilen = insn_length(p->ainsn.insn[0] >> 8); 378 if (ip - (unsigned long) p->ainsn.insn == ilen) 379 ip = (unsigned long) p->addr + ilen; 380 } 381 382 if (fixup & FIXUP_RETURN_REGISTER) { 383 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 384 regs->gprs[reg] += (unsigned long) p->addr - 385 (unsigned long) p->ainsn.insn; 386 } 387 388 disable_singlestep(kcb, regs, ip); 389 } 390 NOKPROBE_SYMBOL(resume_execution); 391 392 static int post_kprobe_handler(struct pt_regs *regs) 393 { 394 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 395 struct kprobe *p = kprobe_running(); 396 397 if (!p) 398 return 0; 399 400 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 401 kcb->kprobe_status = KPROBE_HIT_SSDONE; 402 p->post_handler(p, regs, 0); 403 } 404 405 resume_execution(p, regs); 406 pop_kprobe(kcb); 407 preempt_enable_no_resched(); 408 409 /* 410 * if somebody else is singlestepping across a probe point, psw mask 411 * will have PER set, in which case, continue the remaining processing 412 * of do_single_step, as if this is not a probe hit. 413 */ 414 if (regs->psw.mask & PSW_MASK_PER) 415 return 0; 416 417 return 1; 418 } 419 NOKPROBE_SYMBOL(post_kprobe_handler); 420 421 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 422 { 423 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 424 struct kprobe *p = kprobe_running(); 425 const struct exception_table_entry *entry; 426 427 switch(kcb->kprobe_status) { 428 case KPROBE_HIT_SS: 429 case KPROBE_REENTER: 430 /* 431 * We are here because the instruction being single 432 * stepped caused a page fault. We reset the current 433 * kprobe and the nip points back to the probe address 434 * and allow the page fault handler to continue as a 435 * normal page fault. 436 */ 437 disable_singlestep(kcb, regs, (unsigned long) p->addr); 438 pop_kprobe(kcb); 439 preempt_enable_no_resched(); 440 break; 441 case KPROBE_HIT_ACTIVE: 442 case KPROBE_HIT_SSDONE: 443 /* 444 * In case the user-specified fault handler returned 445 * zero, try to fix up. 446 */ 447 entry = s390_search_extables(regs->psw.addr); 448 if (entry && ex_handle(entry, regs)) 449 return 1; 450 451 /* 452 * fixup_exception() could not handle it, 453 * Let do_page_fault() fix it. 454 */ 455 break; 456 default: 457 break; 458 } 459 return 0; 460 } 461 NOKPROBE_SYMBOL(kprobe_trap_handler); 462 463 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 464 { 465 int ret; 466 467 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 468 local_irq_disable(); 469 ret = kprobe_trap_handler(regs, trapnr); 470 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 471 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 472 return ret; 473 } 474 NOKPROBE_SYMBOL(kprobe_fault_handler); 475 476 /* 477 * Wrapper routine to for handling exceptions. 478 */ 479 int kprobe_exceptions_notify(struct notifier_block *self, 480 unsigned long val, void *data) 481 { 482 struct die_args *args = (struct die_args *) data; 483 struct pt_regs *regs = args->regs; 484 int ret = NOTIFY_DONE; 485 486 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 487 local_irq_disable(); 488 489 switch (val) { 490 case DIE_BPT: 491 if (kprobe_handler(regs)) 492 ret = NOTIFY_STOP; 493 break; 494 case DIE_SSTEP: 495 if (post_kprobe_handler(regs)) 496 ret = NOTIFY_STOP; 497 break; 498 case DIE_TRAP: 499 if (!preemptible() && kprobe_running() && 500 kprobe_trap_handler(regs, args->trapnr)) 501 ret = NOTIFY_STOP; 502 break; 503 default: 504 break; 505 } 506 507 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 508 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 509 510 return ret; 511 } 512 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 513 514 static struct kprobe trampoline = { 515 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 516 .pre_handler = trampoline_probe_handler 517 }; 518 519 int __init arch_init_kprobes(void) 520 { 521 return register_kprobe(&trampoline); 522 } 523 524 int arch_trampoline_kprobe(struct kprobe *p) 525 { 526 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 527 } 528 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 529