1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corp. 2002, 2006 19 * 20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 21 */ 22 23 #include <linux/kprobes.h> 24 #include <linux/ptrace.h> 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 #include <linux/uaccess.h> 29 #include <asm/cacheflush.h> 30 #include <asm/sections.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 #include <linux/hardirq.h> 34 35 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 36 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 37 38 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 39 40 static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) 41 { 42 switch (insn[0] >> 8) { 43 case 0x0c: /* bassm */ 44 case 0x0b: /* bsm */ 45 case 0x83: /* diag */ 46 case 0x44: /* ex */ 47 case 0xac: /* stnsm */ 48 case 0xad: /* stosm */ 49 return -EINVAL; 50 } 51 switch (insn[0]) { 52 case 0x0101: /* pr */ 53 case 0xb25a: /* bsa */ 54 case 0xb240: /* bakr */ 55 case 0xb258: /* bsg */ 56 case 0xb218: /* pc */ 57 case 0xb228: /* pt */ 58 case 0xb98d: /* epsw */ 59 return -EINVAL; 60 } 61 return 0; 62 } 63 64 static int __kprobes get_fixup_type(kprobe_opcode_t *insn) 65 { 66 /* default fixup method */ 67 int fixup = FIXUP_PSW_NORMAL; 68 69 switch (insn[0] >> 8) { 70 case 0x05: /* balr */ 71 case 0x0d: /* basr */ 72 fixup = FIXUP_RETURN_REGISTER; 73 /* if r2 = 0, no branch will be taken */ 74 if ((insn[0] & 0x0f) == 0) 75 fixup |= FIXUP_BRANCH_NOT_TAKEN; 76 break; 77 case 0x06: /* bctr */ 78 case 0x07: /* bcr */ 79 fixup = FIXUP_BRANCH_NOT_TAKEN; 80 break; 81 case 0x45: /* bal */ 82 case 0x4d: /* bas */ 83 fixup = FIXUP_RETURN_REGISTER; 84 break; 85 case 0x47: /* bc */ 86 case 0x46: /* bct */ 87 case 0x86: /* bxh */ 88 case 0x87: /* bxle */ 89 fixup = FIXUP_BRANCH_NOT_TAKEN; 90 break; 91 case 0x82: /* lpsw */ 92 fixup = FIXUP_NOT_REQUIRED; 93 break; 94 case 0xb2: /* lpswe */ 95 if ((insn[0] & 0xff) == 0xb2) 96 fixup = FIXUP_NOT_REQUIRED; 97 break; 98 case 0xa7: /* bras */ 99 if ((insn[0] & 0x0f) == 0x05) 100 fixup |= FIXUP_RETURN_REGISTER; 101 break; 102 case 0xc0: 103 if ((insn[0] & 0x0f) == 0x00 || /* larl */ 104 (insn[0] & 0x0f) == 0x05) /* brasl */ 105 fixup |= FIXUP_RETURN_REGISTER; 106 break; 107 case 0xeb: 108 switch (insn[2] & 0xff) { 109 case 0x44: /* bxhg */ 110 case 0x45: /* bxleg */ 111 fixup = FIXUP_BRANCH_NOT_TAKEN; 112 break; 113 } 114 break; 115 case 0xe3: /* bctg */ 116 if ((insn[2] & 0xff) == 0x46) 117 fixup = FIXUP_BRANCH_NOT_TAKEN; 118 break; 119 case 0xec: 120 switch (insn[2] & 0xff) { 121 case 0xe5: /* clgrb */ 122 case 0xe6: /* cgrb */ 123 case 0xf6: /* crb */ 124 case 0xf7: /* clrb */ 125 case 0xfc: /* cgib */ 126 case 0xfd: /* cglib */ 127 case 0xfe: /* cib */ 128 case 0xff: /* clib */ 129 fixup = FIXUP_BRANCH_NOT_TAKEN; 130 break; 131 } 132 break; 133 } 134 return fixup; 135 } 136 137 int __kprobes arch_prepare_kprobe(struct kprobe *p) 138 { 139 if ((unsigned long) p->addr & 0x01) 140 return -EINVAL; 141 142 /* Make sure the probe isn't going on a difficult instruction */ 143 if (is_prohibited_opcode(p->addr)) 144 return -EINVAL; 145 146 p->opcode = *p->addr; 147 memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); 148 149 return 0; 150 } 151 152 struct ins_replace_args { 153 kprobe_opcode_t *ptr; 154 kprobe_opcode_t opcode; 155 }; 156 157 static int __kprobes swap_instruction(void *aref) 158 { 159 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 160 unsigned long status = kcb->kprobe_status; 161 struct ins_replace_args *args = aref; 162 163 kcb->kprobe_status = KPROBE_SWAP_INST; 164 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); 165 kcb->kprobe_status = status; 166 return 0; 167 } 168 169 void __kprobes arch_arm_kprobe(struct kprobe *p) 170 { 171 struct ins_replace_args args; 172 173 args.ptr = p->addr; 174 args.opcode = BREAKPOINT_INSTRUCTION; 175 stop_machine(swap_instruction, &args, NULL); 176 } 177 178 void __kprobes arch_disarm_kprobe(struct kprobe *p) 179 { 180 struct ins_replace_args args; 181 182 args.ptr = p->addr; 183 args.opcode = p->opcode; 184 stop_machine(swap_instruction, &args, NULL); 185 } 186 187 void __kprobes arch_remove_kprobe(struct kprobe *p) 188 { 189 } 190 191 static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, 192 struct pt_regs *regs, 193 unsigned long ip) 194 { 195 struct per_regs per_kprobe; 196 197 /* Set up the PER control registers %cr9-%cr11 */ 198 per_kprobe.control = PER_EVENT_IFETCH; 199 per_kprobe.start = ip; 200 per_kprobe.end = ip; 201 202 /* Save control regs and psw mask */ 203 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 204 kcb->kprobe_saved_imask = regs->psw.mask & 205 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 206 207 /* Set PER control regs, turns on single step for the given address */ 208 __ctl_load(per_kprobe, 9, 11); 209 regs->psw.mask |= PSW_MASK_PER; 210 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 211 regs->psw.addr = ip | PSW_ADDR_AMODE; 212 } 213 214 static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, 215 struct pt_regs *regs, 216 unsigned long ip) 217 { 218 /* Restore control regs and psw mask, set new psw address */ 219 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 220 regs->psw.mask &= ~PSW_MASK_PER; 221 regs->psw.mask |= kcb->kprobe_saved_imask; 222 regs->psw.addr = ip | PSW_ADDR_AMODE; 223 } 224 225 /* 226 * Activate a kprobe by storing its pointer to current_kprobe. The 227 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 228 * two kprobes can be active, see KPROBE_REENTER. 229 */ 230 static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 231 { 232 kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe); 233 kcb->prev_kprobe.status = kcb->kprobe_status; 234 __get_cpu_var(current_kprobe) = p; 235 } 236 237 /* 238 * Deactivate a kprobe by backing up to the previous state. If the 239 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 240 * for any other state prev_kprobe.kp will be NULL. 241 */ 242 static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) 243 { 244 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 245 kcb->kprobe_status = kcb->prev_kprobe.status; 246 } 247 248 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 249 struct pt_regs *regs) 250 { 251 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 252 253 /* Replace the return addr with trampoline addr */ 254 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 255 } 256 257 static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, 258 struct kprobe *p) 259 { 260 switch (kcb->kprobe_status) { 261 case KPROBE_HIT_SSDONE: 262 case KPROBE_HIT_ACTIVE: 263 kprobes_inc_nmissed_count(p); 264 break; 265 case KPROBE_HIT_SS: 266 case KPROBE_REENTER: 267 default: 268 /* 269 * A kprobe on the code path to single step an instruction 270 * is a BUG. The code path resides in the .kprobes.text 271 * section and is executed with interrupts disabled. 272 */ 273 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); 274 dump_kprobe(p); 275 BUG(); 276 } 277 } 278 279 static int __kprobes kprobe_handler(struct pt_regs *regs) 280 { 281 struct kprobe_ctlblk *kcb; 282 struct kprobe *p; 283 284 /* 285 * We want to disable preemption for the entire duration of kprobe 286 * processing. That includes the calls to the pre/post handlers 287 * and single stepping the kprobe instruction. 288 */ 289 preempt_disable(); 290 kcb = get_kprobe_ctlblk(); 291 p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); 292 293 if (p) { 294 if (kprobe_running()) { 295 /* 296 * We have hit a kprobe while another is still 297 * active. This can happen in the pre and post 298 * handler. Single step the instruction of the 299 * new probe but do not call any handler function 300 * of this secondary kprobe. 301 * push_kprobe and pop_kprobe saves and restores 302 * the currently active kprobe. 303 */ 304 kprobe_reenter_check(kcb, p); 305 push_kprobe(kcb, p); 306 kcb->kprobe_status = KPROBE_REENTER; 307 } else { 308 /* 309 * If we have no pre-handler or it returned 0, we 310 * continue with single stepping. If we have a 311 * pre-handler and it returned non-zero, it prepped 312 * for calling the break_handler below on re-entry 313 * for jprobe processing, so get out doing nothing 314 * more here. 315 */ 316 push_kprobe(kcb, p); 317 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 318 if (p->pre_handler && p->pre_handler(p, regs)) 319 return 1; 320 kcb->kprobe_status = KPROBE_HIT_SS; 321 } 322 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 323 return 1; 324 } else if (kprobe_running()) { 325 p = __get_cpu_var(current_kprobe); 326 if (p->break_handler && p->break_handler(p, regs)) { 327 /* 328 * Continuation after the jprobe completed and 329 * caused the jprobe_return trap. The jprobe 330 * break_handler "returns" to the original 331 * function that still has the kprobe breakpoint 332 * installed. We continue with single stepping. 333 */ 334 kcb->kprobe_status = KPROBE_HIT_SS; 335 enable_singlestep(kcb, regs, 336 (unsigned long) p->ainsn.insn); 337 return 1; 338 } /* else: 339 * No kprobe at this address and the current kprobe 340 * has no break handler (no jprobe!). The kernel just 341 * exploded, let the standard trap handler pick up the 342 * pieces. 343 */ 344 } /* else: 345 * No kprobe at this address and no active kprobe. The trap has 346 * not been caused by a kprobe breakpoint. The race of breakpoint 347 * vs. kprobe remove does not exist because on s390 as we use 348 * stop_machine to arm/disarm the breakpoints. 349 */ 350 preempt_enable_no_resched(); 351 return 0; 352 } 353 354 /* 355 * Function return probe trampoline: 356 * - init_kprobes() establishes a probepoint here 357 * - When the probed function returns, this probe 358 * causes the handlers to fire 359 */ 360 static void __used kretprobe_trampoline_holder(void) 361 { 362 asm volatile(".global kretprobe_trampoline\n" 363 "kretprobe_trampoline: bcr 0,0\n"); 364 } 365 366 /* 367 * Called when the probe at kretprobe trampoline is hit 368 */ 369 static int __kprobes trampoline_probe_handler(struct kprobe *p, 370 struct pt_regs *regs) 371 { 372 struct kretprobe_instance *ri; 373 struct hlist_head *head, empty_rp; 374 struct hlist_node *tmp; 375 unsigned long flags, orig_ret_address; 376 unsigned long trampoline_address; 377 kprobe_opcode_t *correct_ret_addr; 378 379 INIT_HLIST_HEAD(&empty_rp); 380 kretprobe_hash_lock(current, &head, &flags); 381 382 /* 383 * It is possible to have multiple instances associated with a given 384 * task either because an multiple functions in the call path 385 * have a return probe installed on them, and/or more than one return 386 * return probe was registered for a target function. 387 * 388 * We can handle this because: 389 * - instances are always inserted at the head of the list 390 * - when multiple return probes are registered for the same 391 * function, the first instance's ret_addr will point to the 392 * real return address, and all the rest will point to 393 * kretprobe_trampoline 394 */ 395 ri = NULL; 396 orig_ret_address = 0; 397 correct_ret_addr = NULL; 398 trampoline_address = (unsigned long) &kretprobe_trampoline; 399 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 400 if (ri->task != current) 401 /* another task is sharing our hash bucket */ 402 continue; 403 404 orig_ret_address = (unsigned long) ri->ret_addr; 405 406 if (orig_ret_address != trampoline_address) 407 /* 408 * This is the real return address. Any other 409 * instances associated with this task are for 410 * other calls deeper on the call stack 411 */ 412 break; 413 } 414 415 kretprobe_assert(ri, orig_ret_address, trampoline_address); 416 417 correct_ret_addr = ri->ret_addr; 418 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 419 if (ri->task != current) 420 /* another task is sharing our hash bucket */ 421 continue; 422 423 orig_ret_address = (unsigned long) ri->ret_addr; 424 425 if (ri->rp && ri->rp->handler) { 426 ri->ret_addr = correct_ret_addr; 427 ri->rp->handler(ri, regs); 428 } 429 430 recycle_rp_inst(ri, &empty_rp); 431 432 if (orig_ret_address != trampoline_address) 433 /* 434 * This is the real return address. Any other 435 * instances associated with this task are for 436 * other calls deeper on the call stack 437 */ 438 break; 439 } 440 441 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 442 443 pop_kprobe(get_kprobe_ctlblk()); 444 kretprobe_hash_unlock(current, &flags); 445 preempt_enable_no_resched(); 446 447 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 448 hlist_del(&ri->hlist); 449 kfree(ri); 450 } 451 /* 452 * By returning a non-zero value, we are telling 453 * kprobe_handler() that we don't want the post_handler 454 * to run (and have re-enabled preemption) 455 */ 456 return 1; 457 } 458 459 /* 460 * Called after single-stepping. p->addr is the address of the 461 * instruction whose first byte has been replaced by the "breakpoint" 462 * instruction. To avoid the SMP problems that can occur when we 463 * temporarily put back the original opcode to single-step, we 464 * single-stepped a copy of the instruction. The address of this 465 * copy is p->ainsn.insn. 466 */ 467 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 468 { 469 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 470 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 471 int fixup = get_fixup_type(p->ainsn.insn); 472 473 if (fixup & FIXUP_PSW_NORMAL) 474 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 475 476 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 477 int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2; 478 if (ip - (unsigned long) p->ainsn.insn == ilen) 479 ip = (unsigned long) p->addr + ilen; 480 } 481 482 if (fixup & FIXUP_RETURN_REGISTER) { 483 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 484 regs->gprs[reg] += (unsigned long) p->addr - 485 (unsigned long) p->ainsn.insn; 486 } 487 488 disable_singlestep(kcb, regs, ip); 489 } 490 491 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 492 { 493 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 494 struct kprobe *p = kprobe_running(); 495 496 if (!p) 497 return 0; 498 499 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 500 kcb->kprobe_status = KPROBE_HIT_SSDONE; 501 p->post_handler(p, regs, 0); 502 } 503 504 resume_execution(p, regs); 505 pop_kprobe(kcb); 506 preempt_enable_no_resched(); 507 508 /* 509 * if somebody else is singlestepping across a probe point, psw mask 510 * will have PER set, in which case, continue the remaining processing 511 * of do_single_step, as if this is not a probe hit. 512 */ 513 if (regs->psw.mask & PSW_MASK_PER) 514 return 0; 515 516 return 1; 517 } 518 519 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) 520 { 521 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 522 struct kprobe *p = kprobe_running(); 523 const struct exception_table_entry *entry; 524 525 switch(kcb->kprobe_status) { 526 case KPROBE_SWAP_INST: 527 /* We are here because the instruction replacement failed */ 528 return 0; 529 case KPROBE_HIT_SS: 530 case KPROBE_REENTER: 531 /* 532 * We are here because the instruction being single 533 * stepped caused a page fault. We reset the current 534 * kprobe and the nip points back to the probe address 535 * and allow the page fault handler to continue as a 536 * normal page fault. 537 */ 538 disable_singlestep(kcb, regs, (unsigned long) p->addr); 539 pop_kprobe(kcb); 540 preempt_enable_no_resched(); 541 break; 542 case KPROBE_HIT_ACTIVE: 543 case KPROBE_HIT_SSDONE: 544 /* 545 * We increment the nmissed count for accounting, 546 * we can also use npre/npostfault count for accouting 547 * these specific fault cases. 548 */ 549 kprobes_inc_nmissed_count(p); 550 551 /* 552 * We come here because instructions in the pre/post 553 * handler caused the page_fault, this could happen 554 * if handler tries to access user space by 555 * copy_from_user(), get_user() etc. Let the 556 * user-specified handler try to fix it first. 557 */ 558 if (p->fault_handler && p->fault_handler(p, regs, trapnr)) 559 return 1; 560 561 /* 562 * In case the user-specified fault handler returned 563 * zero, try to fix up. 564 */ 565 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 566 if (entry) { 567 regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; 568 return 1; 569 } 570 571 /* 572 * fixup_exception() could not handle it, 573 * Let do_page_fault() fix it. 574 */ 575 break; 576 default: 577 break; 578 } 579 return 0; 580 } 581 582 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 583 { 584 int ret; 585 586 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 587 local_irq_disable(); 588 ret = kprobe_trap_handler(regs, trapnr); 589 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 590 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 591 return ret; 592 } 593 594 /* 595 * Wrapper routine to for handling exceptions. 596 */ 597 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 598 unsigned long val, void *data) 599 { 600 struct die_args *args = (struct die_args *) data; 601 struct pt_regs *regs = args->regs; 602 int ret = NOTIFY_DONE; 603 604 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 605 local_irq_disable(); 606 607 switch (val) { 608 case DIE_BPT: 609 if (kprobe_handler(regs)) 610 ret = NOTIFY_STOP; 611 break; 612 case DIE_SSTEP: 613 if (post_kprobe_handler(regs)) 614 ret = NOTIFY_STOP; 615 break; 616 case DIE_TRAP: 617 if (!preemptible() && kprobe_running() && 618 kprobe_trap_handler(regs, args->trapnr)) 619 ret = NOTIFY_STOP; 620 break; 621 default: 622 break; 623 } 624 625 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 626 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 627 628 return ret; 629 } 630 631 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 632 { 633 struct jprobe *jp = container_of(p, struct jprobe, kp); 634 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 635 unsigned long stack; 636 637 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 638 639 /* setup return addr to the jprobe handler routine */ 640 regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; 641 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 642 643 /* r15 is the stack pointer */ 644 stack = (unsigned long) regs->gprs[15]; 645 646 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); 647 return 1; 648 } 649 650 void __kprobes jprobe_return(void) 651 { 652 asm volatile(".word 0x0002"); 653 } 654 655 static void __used __kprobes jprobe_return_end(void) 656 { 657 asm volatile("bcr 0,0"); 658 } 659 660 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 661 { 662 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 663 unsigned long stack; 664 665 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; 666 667 /* Put the regs back */ 668 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 669 /* put the stack back */ 670 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); 671 preempt_enable_no_resched(); 672 return 1; 673 } 674 675 static struct kprobe trampoline = { 676 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 677 .pre_handler = trampoline_probe_handler 678 }; 679 680 int __init arch_init_kprobes(void) 681 { 682 return register_kprobe(&trampoline); 683 } 684 685 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 686 { 687 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 688 } 689