1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corp. 2002, 2006 19 * 20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 21 */ 22 23 #include <linux/kprobes.h> 24 #include <linux/ptrace.h> 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 #include <linux/uaccess.h> 29 #include <linux/module.h> 30 #include <linux/slab.h> 31 #include <linux/hardirq.h> 32 #include <linux/ftrace.h> 33 #include <asm/cacheflush.h> 34 #include <asm/sections.h> 35 #include <asm/dis.h> 36 37 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 38 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 39 40 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 41 42 DEFINE_INSN_CACHE_OPS(dmainsn); 43 44 static void *alloc_dmainsn_page(void) 45 { 46 return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 47 } 48 49 static void free_dmainsn_page(void *page) 50 { 51 free_page((unsigned long)page); 52 } 53 54 struct kprobe_insn_cache kprobe_dmainsn_slots = { 55 .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), 56 .alloc = alloc_dmainsn_page, 57 .free = free_dmainsn_page, 58 .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), 59 .insn_size = MAX_INSN_SIZE, 60 }; 61 62 static void copy_instruction(struct kprobe *p) 63 { 64 unsigned long ip = (unsigned long) p->addr; 65 s64 disp, new_disp; 66 u64 addr, new_addr; 67 68 if (ftrace_location(ip) == ip) { 69 /* 70 * If kprobes patches the instruction that is morphed by 71 * ftrace make sure that kprobes always sees the branch 72 * "jg .+24" that skips the mcount block 73 */ 74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); 75 p->ainsn.is_ftrace_insn = 1; 76 } else 77 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); 78 p->opcode = p->ainsn.insn[0]; 79 if (!probe_is_insn_relative_long(p->ainsn.insn)) 80 return; 81 /* 82 * For pc-relative instructions in RIL-b or RIL-c format patch the 83 * RI2 displacement field. We have already made sure that the insn 84 * slot for the patched instruction is within the same 2GB area 85 * as the original instruction (either kernel image or module area). 86 * Therefore the new displacement will always fit. 87 */ 88 disp = *(s32 *)&p->ainsn.insn[1]; 89 addr = (u64)(unsigned long)p->addr; 90 new_addr = (u64)(unsigned long)p->ainsn.insn; 91 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 92 *(s32 *)&p->ainsn.insn[1] = new_disp; 93 } 94 NOKPROBE_SYMBOL(copy_instruction); 95 96 static inline int is_kernel_addr(void *addr) 97 { 98 return addr < (void *)_end; 99 } 100 101 static int s390_get_insn_slot(struct kprobe *p) 102 { 103 /* 104 * Get an insn slot that is within the same 2GB area like the original 105 * instruction. That way instructions with a 32bit signed displacement 106 * field can be patched and executed within the insn slot. 107 */ 108 p->ainsn.insn = NULL; 109 if (is_kernel_addr(p->addr)) 110 p->ainsn.insn = get_dmainsn_slot(); 111 else if (is_module_addr(p->addr)) 112 p->ainsn.insn = get_insn_slot(); 113 return p->ainsn.insn ? 0 : -ENOMEM; 114 } 115 NOKPROBE_SYMBOL(s390_get_insn_slot); 116 117 static void s390_free_insn_slot(struct kprobe *p) 118 { 119 if (!p->ainsn.insn) 120 return; 121 if (is_kernel_addr(p->addr)) 122 free_dmainsn_slot(p->ainsn.insn, 0); 123 else 124 free_insn_slot(p->ainsn.insn, 0); 125 p->ainsn.insn = NULL; 126 } 127 NOKPROBE_SYMBOL(s390_free_insn_slot); 128 129 int arch_prepare_kprobe(struct kprobe *p) 130 { 131 if ((unsigned long) p->addr & 0x01) 132 return -EINVAL; 133 /* Make sure the probe isn't going on a difficult instruction */ 134 if (probe_is_prohibited_opcode(p->addr)) 135 return -EINVAL; 136 if (s390_get_insn_slot(p)) 137 return -ENOMEM; 138 copy_instruction(p); 139 return 0; 140 } 141 NOKPROBE_SYMBOL(arch_prepare_kprobe); 142 143 int arch_check_ftrace_location(struct kprobe *p) 144 { 145 return 0; 146 } 147 148 struct swap_insn_args { 149 struct kprobe *p; 150 unsigned int arm_kprobe : 1; 151 }; 152 153 static int swap_instruction(void *data) 154 { 155 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 156 unsigned long status = kcb->kprobe_status; 157 struct swap_insn_args *args = data; 158 struct ftrace_insn new_insn, *insn; 159 struct kprobe *p = args->p; 160 size_t len; 161 162 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 163 len = sizeof(new_insn.opc); 164 if (!p->ainsn.is_ftrace_insn) 165 goto skip_ftrace; 166 len = sizeof(new_insn); 167 insn = (struct ftrace_insn *) p->addr; 168 if (args->arm_kprobe) { 169 if (is_ftrace_nop(insn)) 170 new_insn.disp = KPROBE_ON_FTRACE_NOP; 171 else 172 new_insn.disp = KPROBE_ON_FTRACE_CALL; 173 } else { 174 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); 175 if (insn->disp == KPROBE_ON_FTRACE_NOP) 176 ftrace_generate_nop_insn(&new_insn); 177 } 178 skip_ftrace: 179 kcb->kprobe_status = KPROBE_SWAP_INST; 180 probe_kernel_write(p->addr, &new_insn, len); 181 kcb->kprobe_status = status; 182 return 0; 183 } 184 NOKPROBE_SYMBOL(swap_instruction); 185 186 void arch_arm_kprobe(struct kprobe *p) 187 { 188 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 189 190 stop_machine(swap_instruction, &args, NULL); 191 } 192 NOKPROBE_SYMBOL(arch_arm_kprobe); 193 194 void arch_disarm_kprobe(struct kprobe *p) 195 { 196 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 197 198 stop_machine(swap_instruction, &args, NULL); 199 } 200 NOKPROBE_SYMBOL(arch_disarm_kprobe); 201 202 void arch_remove_kprobe(struct kprobe *p) 203 { 204 s390_free_insn_slot(p); 205 } 206 NOKPROBE_SYMBOL(arch_remove_kprobe); 207 208 static void enable_singlestep(struct kprobe_ctlblk *kcb, 209 struct pt_regs *regs, 210 unsigned long ip) 211 { 212 struct per_regs per_kprobe; 213 214 /* Set up the PER control registers %cr9-%cr11 */ 215 per_kprobe.control = PER_EVENT_IFETCH; 216 per_kprobe.start = ip; 217 per_kprobe.end = ip; 218 219 /* Save control regs and psw mask */ 220 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 221 kcb->kprobe_saved_imask = regs->psw.mask & 222 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 223 224 /* Set PER control regs, turns on single step for the given address */ 225 __ctl_load(per_kprobe, 9, 11); 226 regs->psw.mask |= PSW_MASK_PER; 227 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 228 regs->psw.addr = ip | PSW_ADDR_AMODE; 229 } 230 NOKPROBE_SYMBOL(enable_singlestep); 231 232 static void disable_singlestep(struct kprobe_ctlblk *kcb, 233 struct pt_regs *regs, 234 unsigned long ip) 235 { 236 /* Restore control regs and psw mask, set new psw address */ 237 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 238 regs->psw.mask &= ~PSW_MASK_PER; 239 regs->psw.mask |= kcb->kprobe_saved_imask; 240 regs->psw.addr = ip | PSW_ADDR_AMODE; 241 } 242 NOKPROBE_SYMBOL(disable_singlestep); 243 244 /* 245 * Activate a kprobe by storing its pointer to current_kprobe. The 246 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 247 * two kprobes can be active, see KPROBE_REENTER. 248 */ 249 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 250 { 251 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 252 kcb->prev_kprobe.status = kcb->kprobe_status; 253 __this_cpu_write(current_kprobe, p); 254 } 255 NOKPROBE_SYMBOL(push_kprobe); 256 257 /* 258 * Deactivate a kprobe by backing up to the previous state. If the 259 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 260 * for any other state prev_kprobe.kp will be NULL. 261 */ 262 static void pop_kprobe(struct kprobe_ctlblk *kcb) 263 { 264 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 265 kcb->kprobe_status = kcb->prev_kprobe.status; 266 } 267 NOKPROBE_SYMBOL(pop_kprobe); 268 269 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 270 { 271 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 272 273 /* Replace the return addr with trampoline addr */ 274 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 275 } 276 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 277 278 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 279 { 280 switch (kcb->kprobe_status) { 281 case KPROBE_HIT_SSDONE: 282 case KPROBE_HIT_ACTIVE: 283 kprobes_inc_nmissed_count(p); 284 break; 285 case KPROBE_HIT_SS: 286 case KPROBE_REENTER: 287 default: 288 /* 289 * A kprobe on the code path to single step an instruction 290 * is a BUG. The code path resides in the .kprobes.text 291 * section and is executed with interrupts disabled. 292 */ 293 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); 294 dump_kprobe(p); 295 BUG(); 296 } 297 } 298 NOKPROBE_SYMBOL(kprobe_reenter_check); 299 300 static int kprobe_handler(struct pt_regs *regs) 301 { 302 struct kprobe_ctlblk *kcb; 303 struct kprobe *p; 304 305 /* 306 * We want to disable preemption for the entire duration of kprobe 307 * processing. That includes the calls to the pre/post handlers 308 * and single stepping the kprobe instruction. 309 */ 310 preempt_disable(); 311 kcb = get_kprobe_ctlblk(); 312 p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); 313 314 if (p) { 315 if (kprobe_running()) { 316 /* 317 * We have hit a kprobe while another is still 318 * active. This can happen in the pre and post 319 * handler. Single step the instruction of the 320 * new probe but do not call any handler function 321 * of this secondary kprobe. 322 * push_kprobe and pop_kprobe saves and restores 323 * the currently active kprobe. 324 */ 325 kprobe_reenter_check(kcb, p); 326 push_kprobe(kcb, p); 327 kcb->kprobe_status = KPROBE_REENTER; 328 } else { 329 /* 330 * If we have no pre-handler or it returned 0, we 331 * continue with single stepping. If we have a 332 * pre-handler and it returned non-zero, it prepped 333 * for calling the break_handler below on re-entry 334 * for jprobe processing, so get out doing nothing 335 * more here. 336 */ 337 push_kprobe(kcb, p); 338 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 339 if (p->pre_handler && p->pre_handler(p, regs)) 340 return 1; 341 kcb->kprobe_status = KPROBE_HIT_SS; 342 } 343 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 344 return 1; 345 } else if (kprobe_running()) { 346 p = __this_cpu_read(current_kprobe); 347 if (p->break_handler && p->break_handler(p, regs)) { 348 /* 349 * Continuation after the jprobe completed and 350 * caused the jprobe_return trap. The jprobe 351 * break_handler "returns" to the original 352 * function that still has the kprobe breakpoint 353 * installed. We continue with single stepping. 354 */ 355 kcb->kprobe_status = KPROBE_HIT_SS; 356 enable_singlestep(kcb, regs, 357 (unsigned long) p->ainsn.insn); 358 return 1; 359 } /* else: 360 * No kprobe at this address and the current kprobe 361 * has no break handler (no jprobe!). The kernel just 362 * exploded, let the standard trap handler pick up the 363 * pieces. 364 */ 365 } /* else: 366 * No kprobe at this address and no active kprobe. The trap has 367 * not been caused by a kprobe breakpoint. The race of breakpoint 368 * vs. kprobe remove does not exist because on s390 as we use 369 * stop_machine to arm/disarm the breakpoints. 370 */ 371 preempt_enable_no_resched(); 372 return 0; 373 } 374 NOKPROBE_SYMBOL(kprobe_handler); 375 376 /* 377 * Function return probe trampoline: 378 * - init_kprobes() establishes a probepoint here 379 * - When the probed function returns, this probe 380 * causes the handlers to fire 381 */ 382 static void __used kretprobe_trampoline_holder(void) 383 { 384 asm volatile(".global kretprobe_trampoline\n" 385 "kretprobe_trampoline: bcr 0,0\n"); 386 } 387 388 /* 389 * Called when the probe at kretprobe trampoline is hit 390 */ 391 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 392 { 393 struct kretprobe_instance *ri; 394 struct hlist_head *head, empty_rp; 395 struct hlist_node *tmp; 396 unsigned long flags, orig_ret_address; 397 unsigned long trampoline_address; 398 kprobe_opcode_t *correct_ret_addr; 399 400 INIT_HLIST_HEAD(&empty_rp); 401 kretprobe_hash_lock(current, &head, &flags); 402 403 /* 404 * It is possible to have multiple instances associated with a given 405 * task either because an multiple functions in the call path 406 * have a return probe installed on them, and/or more than one return 407 * return probe was registered for a target function. 408 * 409 * We can handle this because: 410 * - instances are always inserted at the head of the list 411 * - when multiple return probes are registered for the same 412 * function, the first instance's ret_addr will point to the 413 * real return address, and all the rest will point to 414 * kretprobe_trampoline 415 */ 416 ri = NULL; 417 orig_ret_address = 0; 418 correct_ret_addr = NULL; 419 trampoline_address = (unsigned long) &kretprobe_trampoline; 420 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 421 if (ri->task != current) 422 /* another task is sharing our hash bucket */ 423 continue; 424 425 orig_ret_address = (unsigned long) ri->ret_addr; 426 427 if (orig_ret_address != trampoline_address) 428 /* 429 * This is the real return address. Any other 430 * instances associated with this task are for 431 * other calls deeper on the call stack 432 */ 433 break; 434 } 435 436 kretprobe_assert(ri, orig_ret_address, trampoline_address); 437 438 correct_ret_addr = ri->ret_addr; 439 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 440 if (ri->task != current) 441 /* another task is sharing our hash bucket */ 442 continue; 443 444 orig_ret_address = (unsigned long) ri->ret_addr; 445 446 if (ri->rp && ri->rp->handler) { 447 ri->ret_addr = correct_ret_addr; 448 ri->rp->handler(ri, regs); 449 } 450 451 recycle_rp_inst(ri, &empty_rp); 452 453 if (orig_ret_address != trampoline_address) 454 /* 455 * This is the real return address. Any other 456 * instances associated with this task are for 457 * other calls deeper on the call stack 458 */ 459 break; 460 } 461 462 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 463 464 pop_kprobe(get_kprobe_ctlblk()); 465 kretprobe_hash_unlock(current, &flags); 466 preempt_enable_no_resched(); 467 468 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 469 hlist_del(&ri->hlist); 470 kfree(ri); 471 } 472 /* 473 * By returning a non-zero value, we are telling 474 * kprobe_handler() that we don't want the post_handler 475 * to run (and have re-enabled preemption) 476 */ 477 return 1; 478 } 479 NOKPROBE_SYMBOL(trampoline_probe_handler); 480 481 /* 482 * Called after single-stepping. p->addr is the address of the 483 * instruction whose first byte has been replaced by the "breakpoint" 484 * instruction. To avoid the SMP problems that can occur when we 485 * temporarily put back the original opcode to single-step, we 486 * single-stepped a copy of the instruction. The address of this 487 * copy is p->ainsn.insn. 488 */ 489 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 490 { 491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 492 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 493 int fixup = probe_get_fixup_type(p->ainsn.insn); 494 495 /* Check if the kprobes location is an enabled ftrace caller */ 496 if (p->ainsn.is_ftrace_insn) { 497 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; 498 struct ftrace_insn call_insn; 499 500 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); 501 /* 502 * A kprobe on an enabled ftrace call site actually single 503 * stepped an unconditional branch (ftrace nop equivalent). 504 * Now we need to fixup things and pretend that a brasl r0,... 505 * was executed instead. 506 */ 507 if (insn->disp == KPROBE_ON_FTRACE_CALL) { 508 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; 509 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); 510 } 511 } 512 513 if (fixup & FIXUP_PSW_NORMAL) 514 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 515 516 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 517 int ilen = insn_length(p->ainsn.insn[0] >> 8); 518 if (ip - (unsigned long) p->ainsn.insn == ilen) 519 ip = (unsigned long) p->addr + ilen; 520 } 521 522 if (fixup & FIXUP_RETURN_REGISTER) { 523 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 524 regs->gprs[reg] += (unsigned long) p->addr - 525 (unsigned long) p->ainsn.insn; 526 } 527 528 disable_singlestep(kcb, regs, ip); 529 } 530 NOKPROBE_SYMBOL(resume_execution); 531 532 static int post_kprobe_handler(struct pt_regs *regs) 533 { 534 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 535 struct kprobe *p = kprobe_running(); 536 537 if (!p) 538 return 0; 539 540 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 541 kcb->kprobe_status = KPROBE_HIT_SSDONE; 542 p->post_handler(p, regs, 0); 543 } 544 545 resume_execution(p, regs); 546 pop_kprobe(kcb); 547 preempt_enable_no_resched(); 548 549 /* 550 * if somebody else is singlestepping across a probe point, psw mask 551 * will have PER set, in which case, continue the remaining processing 552 * of do_single_step, as if this is not a probe hit. 553 */ 554 if (regs->psw.mask & PSW_MASK_PER) 555 return 0; 556 557 return 1; 558 } 559 NOKPROBE_SYMBOL(post_kprobe_handler); 560 561 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 562 { 563 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 564 struct kprobe *p = kprobe_running(); 565 const struct exception_table_entry *entry; 566 567 switch(kcb->kprobe_status) { 568 case KPROBE_SWAP_INST: 569 /* We are here because the instruction replacement failed */ 570 return 0; 571 case KPROBE_HIT_SS: 572 case KPROBE_REENTER: 573 /* 574 * We are here because the instruction being single 575 * stepped caused a page fault. We reset the current 576 * kprobe and the nip points back to the probe address 577 * and allow the page fault handler to continue as a 578 * normal page fault. 579 */ 580 disable_singlestep(kcb, regs, (unsigned long) p->addr); 581 pop_kprobe(kcb); 582 preempt_enable_no_resched(); 583 break; 584 case KPROBE_HIT_ACTIVE: 585 case KPROBE_HIT_SSDONE: 586 /* 587 * We increment the nmissed count for accounting, 588 * we can also use npre/npostfault count for accounting 589 * these specific fault cases. 590 */ 591 kprobes_inc_nmissed_count(p); 592 593 /* 594 * We come here because instructions in the pre/post 595 * handler caused the page_fault, this could happen 596 * if handler tries to access user space by 597 * copy_from_user(), get_user() etc. Let the 598 * user-specified handler try to fix it first. 599 */ 600 if (p->fault_handler && p->fault_handler(p, regs, trapnr)) 601 return 1; 602 603 /* 604 * In case the user-specified fault handler returned 605 * zero, try to fix up. 606 */ 607 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 608 if (entry) { 609 regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; 610 return 1; 611 } 612 613 /* 614 * fixup_exception() could not handle it, 615 * Let do_page_fault() fix it. 616 */ 617 break; 618 default: 619 break; 620 } 621 return 0; 622 } 623 NOKPROBE_SYMBOL(kprobe_trap_handler); 624 625 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 626 { 627 int ret; 628 629 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 630 local_irq_disable(); 631 ret = kprobe_trap_handler(regs, trapnr); 632 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 633 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 634 return ret; 635 } 636 NOKPROBE_SYMBOL(kprobe_fault_handler); 637 638 /* 639 * Wrapper routine to for handling exceptions. 640 */ 641 int kprobe_exceptions_notify(struct notifier_block *self, 642 unsigned long val, void *data) 643 { 644 struct die_args *args = (struct die_args *) data; 645 struct pt_regs *regs = args->regs; 646 int ret = NOTIFY_DONE; 647 648 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 649 local_irq_disable(); 650 651 switch (val) { 652 case DIE_BPT: 653 if (kprobe_handler(regs)) 654 ret = NOTIFY_STOP; 655 break; 656 case DIE_SSTEP: 657 if (post_kprobe_handler(regs)) 658 ret = NOTIFY_STOP; 659 break; 660 case DIE_TRAP: 661 if (!preemptible() && kprobe_running() && 662 kprobe_trap_handler(regs, args->trapnr)) 663 ret = NOTIFY_STOP; 664 break; 665 default: 666 break; 667 } 668 669 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 670 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 671 672 return ret; 673 } 674 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 675 676 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 677 { 678 struct jprobe *jp = container_of(p, struct jprobe, kp); 679 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 680 unsigned long stack; 681 682 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 683 684 /* setup return addr to the jprobe handler routine */ 685 regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; 686 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 687 688 /* r15 is the stack pointer */ 689 stack = (unsigned long) regs->gprs[15]; 690 691 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); 692 return 1; 693 } 694 NOKPROBE_SYMBOL(setjmp_pre_handler); 695 696 void jprobe_return(void) 697 { 698 asm volatile(".word 0x0002"); 699 } 700 NOKPROBE_SYMBOL(jprobe_return); 701 702 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 703 { 704 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 705 unsigned long stack; 706 707 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; 708 709 /* Put the regs back */ 710 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 711 /* put the stack back */ 712 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); 713 preempt_enable_no_resched(); 714 return 1; 715 } 716 NOKPROBE_SYMBOL(longjmp_break_handler); 717 718 static struct kprobe trampoline = { 719 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 720 .pre_handler = trampoline_probe_handler 721 }; 722 723 int __init arch_init_kprobes(void) 724 { 725 return register_kprobe(&trampoline); 726 } 727 728 int arch_trampoline_kprobe(struct kprobe *p) 729 { 730 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 731 } 732 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 733