1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2002, 2004 19 * 20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 21 * Probes initial implementation ( includes contributions from 22 * Rusty Russell). 23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * interface to access function arguments. 25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 26 * <prasanna@in.ibm.com> adapted for x86_64 from i386. 27 * 2005-Mar Roland McGrath <roland@redhat.com> 28 * Fixed to handle %rip-relative addressing mode correctly. 29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 31 * <prasanna@in.ibm.com> added function-return probes. 32 * 2005-May Rusty Lynch <rusty.lynch@intel.com> 33 * Added function return probes functionality 34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added 35 * kprobe-booster and kretprobe-booster for i386. 36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster 37 * and kretprobe-booster for x86-64 38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven 39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> 40 * unified x86 kprobes code. 41 */ 42 #include <linux/kprobes.h> 43 #include <linux/ptrace.h> 44 #include <linux/string.h> 45 #include <linux/slab.h> 46 #include <linux/hardirq.h> 47 #include <linux/preempt.h> 48 #include <linux/module.h> 49 #include <linux/kdebug.h> 50 #include <linux/kallsyms.h> 51 #include <linux/ftrace.h> 52 53 #include <asm/cacheflush.h> 54 #include <asm/desc.h> 55 #include <asm/pgtable.h> 56 #include <asm/uaccess.h> 57 #include <asm/alternative.h> 58 #include <asm/insn.h> 59 #include <asm/debugreg.h> 60 61 #include "common.h" 62 63 void jprobe_return_end(void); 64 65 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 66 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 67 68 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs)) 69 70 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ 71 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 72 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ 73 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ 74 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ 75 << (row % 32)) 76 /* 77 * Undefined/reserved opcodes, conditional jump, Opcode Extension 78 * Groups, and some special opcodes can not boost. 79 * This is non-const and volatile to keep gcc from statically 80 * optimizing it out, as variable_test_bit makes gcc think only 81 * *(unsigned long*) is used. 82 */ 83 static volatile u32 twobyte_is_boostable[256 / 32] = { 84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 85 /* ---------------------------------------------- */ 86 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 87 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ 88 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 89 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 90 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 91 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ 92 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ 93 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 94 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ 95 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 96 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ 97 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ 98 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ 99 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ 100 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ 101 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ 102 /* ----------------------------------------------- */ 103 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 104 }; 105 #undef W 106 107 struct kretprobe_blackpoint kretprobe_blacklist[] = { 108 {"__switch_to", }, /* This function switches only current task, but 109 doesn't switch kernel stack.*/ 110 {NULL, NULL} /* Terminator */ 111 }; 112 113 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 114 115 static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) 116 { 117 struct __arch_relative_insn { 118 u8 op; 119 s32 raddr; 120 } __packed *insn; 121 122 insn = (struct __arch_relative_insn *)from; 123 insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); 124 insn->op = op; 125 } 126 127 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 128 void __kprobes synthesize_reljump(void *from, void *to) 129 { 130 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); 131 } 132 133 /* Insert a call instruction at address 'from', which calls address 'to'.*/ 134 void __kprobes synthesize_relcall(void *from, void *to) 135 { 136 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); 137 } 138 139 /* 140 * Skip the prefixes of the instruction. 141 */ 142 static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) 143 { 144 insn_attr_t attr; 145 146 attr = inat_get_opcode_attribute((insn_byte_t)*insn); 147 while (inat_is_legacy_prefix(attr)) { 148 insn++; 149 attr = inat_get_opcode_attribute((insn_byte_t)*insn); 150 } 151 #ifdef CONFIG_X86_64 152 if (inat_is_rex_prefix(attr)) 153 insn++; 154 #endif 155 return insn; 156 } 157 158 /* 159 * Returns non-zero if opcode is boostable. 160 * RIP relative instructions are adjusted at copying time in 64 bits mode 161 */ 162 int __kprobes can_boost(kprobe_opcode_t *opcodes) 163 { 164 kprobe_opcode_t opcode; 165 kprobe_opcode_t *orig_opcodes = opcodes; 166 167 if (search_exception_tables((unsigned long)opcodes)) 168 return 0; /* Page fault may occur on this address. */ 169 170 retry: 171 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) 172 return 0; 173 opcode = *(opcodes++); 174 175 /* 2nd-byte opcode */ 176 if (opcode == 0x0f) { 177 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) 178 return 0; 179 return test_bit(*opcodes, 180 (unsigned long *)twobyte_is_boostable); 181 } 182 183 switch (opcode & 0xf0) { 184 #ifdef CONFIG_X86_64 185 case 0x40: 186 goto retry; /* REX prefix is boostable */ 187 #endif 188 case 0x60: 189 if (0x63 < opcode && opcode < 0x67) 190 goto retry; /* prefixes */ 191 /* can't boost Address-size override and bound */ 192 return (opcode != 0x62 && opcode != 0x67); 193 case 0x70: 194 return 0; /* can't boost conditional jump */ 195 case 0xc0: 196 /* can't boost software-interruptions */ 197 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; 198 case 0xd0: 199 /* can boost AA* and XLAT */ 200 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); 201 case 0xe0: 202 /* can boost in/out and absolute jmps */ 203 return ((opcode & 0x04) || opcode == 0xea); 204 case 0xf0: 205 if ((opcode & 0x0c) == 0 && opcode != 0xf1) 206 goto retry; /* lock/rep(ne) prefix */ 207 /* clear and set flags are boostable */ 208 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); 209 default: 210 /* segment override prefixes are boostable */ 211 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) 212 goto retry; /* prefixes */ 213 /* CS override prefix and call are not boostable */ 214 return (opcode != 0x2e && opcode != 0x9a); 215 } 216 } 217 218 static unsigned long 219 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) 220 { 221 struct kprobe *kp; 222 223 kp = get_kprobe((void *)addr); 224 /* There is no probe, return original address */ 225 if (!kp) 226 return addr; 227 228 /* 229 * Basically, kp->ainsn.insn has an original instruction. 230 * However, RIP-relative instruction can not do single-stepping 231 * at different place, __copy_instruction() tweaks the displacement of 232 * that instruction. In that case, we can't recover the instruction 233 * from the kp->ainsn.insn. 234 * 235 * On the other hand, kp->opcode has a copy of the first byte of 236 * the probed instruction, which is overwritten by int3. And 237 * the instruction at kp->addr is not modified by kprobes except 238 * for the first byte, we can recover the original instruction 239 * from it and kp->opcode. 240 */ 241 memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 242 buf[0] = kp->opcode; 243 return (unsigned long)buf; 244 } 245 246 /* 247 * Recover the probed instruction at addr for further analysis. 248 * Caller must lock kprobes by kprobe_mutex, or disable preemption 249 * for preventing to release referencing kprobes. 250 */ 251 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) 252 { 253 unsigned long __addr; 254 255 __addr = __recover_optprobed_insn(buf, addr); 256 if (__addr != addr) 257 return __addr; 258 259 return __recover_probed_insn(buf, addr); 260 } 261 262 /* Check if paddr is at an instruction boundary */ 263 static int __kprobes can_probe(unsigned long paddr) 264 { 265 unsigned long addr, __addr, offset = 0; 266 struct insn insn; 267 kprobe_opcode_t buf[MAX_INSN_SIZE]; 268 269 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) 270 return 0; 271 272 /* Decode instructions */ 273 addr = paddr - offset; 274 while (addr < paddr) { 275 /* 276 * Check if the instruction has been modified by another 277 * kprobe, in which case we replace the breakpoint by the 278 * original instruction in our buffer. 279 * Also, jump optimization will change the breakpoint to 280 * relative-jump. Since the relative-jump itself is 281 * normally used, we just go through if there is no kprobe. 282 */ 283 __addr = recover_probed_instruction(buf, addr); 284 kernel_insn_init(&insn, (void *)__addr); 285 insn_get_length(&insn); 286 287 /* 288 * Another debugging subsystem might insert this breakpoint. 289 * In that case, we can't recover it. 290 */ 291 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 292 return 0; 293 addr += insn.length; 294 } 295 296 return (addr == paddr); 297 } 298 299 /* 300 * Returns non-zero if opcode modifies the interrupt flag. 301 */ 302 static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) 303 { 304 /* Skip prefixes */ 305 insn = skip_prefixes(insn); 306 307 switch (*insn) { 308 case 0xfa: /* cli */ 309 case 0xfb: /* sti */ 310 case 0xcf: /* iret/iretd */ 311 case 0x9d: /* popf/popfd */ 312 return 1; 313 } 314 315 return 0; 316 } 317 318 /* 319 * Copy an instruction and adjust the displacement if the instruction 320 * uses the %rip-relative addressing mode. 321 * If it does, Return the address of the 32-bit displacement word. 322 * If not, return null. 323 * Only applicable to 64-bit x86. 324 */ 325 int __kprobes __copy_instruction(u8 *dest, u8 *src) 326 { 327 struct insn insn; 328 kprobe_opcode_t buf[MAX_INSN_SIZE]; 329 330 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src)); 331 insn_get_length(&insn); 332 /* Another subsystem puts a breakpoint, failed to recover */ 333 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) 334 return 0; 335 memcpy(dest, insn.kaddr, insn.length); 336 337 #ifdef CONFIG_X86_64 338 if (insn_rip_relative(&insn)) { 339 s64 newdisp; 340 u8 *disp; 341 kernel_insn_init(&insn, dest); 342 insn_get_displacement(&insn); 343 /* 344 * The copied instruction uses the %rip-relative addressing 345 * mode. Adjust the displacement for the difference between 346 * the original location of this instruction and the location 347 * of the copy that will actually be run. The tricky bit here 348 * is making sure that the sign extension happens correctly in 349 * this calculation, since we need a signed 32-bit result to 350 * be sign-extended to 64 bits when it's added to the %rip 351 * value and yield the same 64-bit result that the sign- 352 * extension of the original signed 32-bit displacement would 353 * have given. 354 */ 355 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; 356 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ 357 disp = (u8 *) dest + insn_offset_displacement(&insn); 358 *(s32 *) disp = (s32) newdisp; 359 } 360 #endif 361 return insn.length; 362 } 363 364 static void __kprobes arch_copy_kprobe(struct kprobe *p) 365 { 366 /* Copy an instruction with recovering if other optprobe modifies it.*/ 367 __copy_instruction(p->ainsn.insn, p->addr); 368 369 /* 370 * __copy_instruction can modify the displacement of the instruction, 371 * but it doesn't affect boostable check. 372 */ 373 if (can_boost(p->ainsn.insn)) 374 p->ainsn.boostable = 0; 375 else 376 p->ainsn.boostable = -1; 377 378 /* Check whether the instruction modifies Interrupt Flag or not */ 379 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); 380 381 /* Also, displacement change doesn't affect the first byte */ 382 p->opcode = p->ainsn.insn[0]; 383 } 384 385 int __kprobes arch_prepare_kprobe(struct kprobe *p) 386 { 387 if (alternatives_text_reserved(p->addr, p->addr)) 388 return -EINVAL; 389 390 if (!can_probe((unsigned long)p->addr)) 391 return -EILSEQ; 392 /* insn: must be on special executable page on x86. */ 393 p->ainsn.insn = get_insn_slot(); 394 if (!p->ainsn.insn) 395 return -ENOMEM; 396 arch_copy_kprobe(p); 397 return 0; 398 } 399 400 void __kprobes arch_arm_kprobe(struct kprobe *p) 401 { 402 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); 403 } 404 405 void __kprobes arch_disarm_kprobe(struct kprobe *p) 406 { 407 text_poke(p->addr, &p->opcode, 1); 408 } 409 410 void __kprobes arch_remove_kprobe(struct kprobe *p) 411 { 412 if (p->ainsn.insn) { 413 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 414 p->ainsn.insn = NULL; 415 } 416 } 417 418 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 419 { 420 kcb->prev_kprobe.kp = kprobe_running(); 421 kcb->prev_kprobe.status = kcb->kprobe_status; 422 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; 423 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; 424 } 425 426 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 427 { 428 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 429 kcb->kprobe_status = kcb->prev_kprobe.status; 430 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; 431 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; 432 } 433 434 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 435 struct kprobe_ctlblk *kcb) 436 { 437 __this_cpu_write(current_kprobe, p); 438 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 439 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 440 if (p->ainsn.if_modifier) 441 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; 442 } 443 444 static void __kprobes clear_btf(void) 445 { 446 if (test_thread_flag(TIF_BLOCKSTEP)) { 447 unsigned long debugctl = get_debugctlmsr(); 448 449 debugctl &= ~DEBUGCTLMSR_BTF; 450 update_debugctlmsr(debugctl); 451 } 452 } 453 454 static void __kprobes restore_btf(void) 455 { 456 if (test_thread_flag(TIF_BLOCKSTEP)) { 457 unsigned long debugctl = get_debugctlmsr(); 458 459 debugctl |= DEBUGCTLMSR_BTF; 460 update_debugctlmsr(debugctl); 461 } 462 } 463 464 void __kprobes 465 arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 466 { 467 unsigned long *sara = stack_addr(regs); 468 469 ri->ret_addr = (kprobe_opcode_t *) *sara; 470 471 /* Replace the return addr with trampoline addr */ 472 *sara = (unsigned long) &kretprobe_trampoline; 473 } 474 475 static void __kprobes 476 setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) 477 { 478 if (setup_detour_execution(p, regs, reenter)) 479 return; 480 481 #if !defined(CONFIG_PREEMPT) 482 if (p->ainsn.boostable == 1 && !p->post_handler) { 483 /* Boost up -- we can execute copied instructions directly */ 484 if (!reenter) 485 reset_current_kprobe(); 486 /* 487 * Reentering boosted probe doesn't reset current_kprobe, 488 * nor set current_kprobe, because it doesn't use single 489 * stepping. 490 */ 491 regs->ip = (unsigned long)p->ainsn.insn; 492 preempt_enable_no_resched(); 493 return; 494 } 495 #endif 496 if (reenter) { 497 save_previous_kprobe(kcb); 498 set_current_kprobe(p, regs, kcb); 499 kcb->kprobe_status = KPROBE_REENTER; 500 } else 501 kcb->kprobe_status = KPROBE_HIT_SS; 502 /* Prepare real single stepping */ 503 clear_btf(); 504 regs->flags |= X86_EFLAGS_TF; 505 regs->flags &= ~X86_EFLAGS_IF; 506 /* single step inline if the instruction is an int3 */ 507 if (p->opcode == BREAKPOINT_INSTRUCTION) 508 regs->ip = (unsigned long)p->addr; 509 else 510 regs->ip = (unsigned long)p->ainsn.insn; 511 } 512 513 /* 514 * We have reentered the kprobe_handler(), since another probe was hit while 515 * within the handler. We save the original kprobes variables and just single 516 * step on the instruction of the new probe without calling any user handlers. 517 */ 518 static int __kprobes 519 reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) 520 { 521 switch (kcb->kprobe_status) { 522 case KPROBE_HIT_SSDONE: 523 case KPROBE_HIT_ACTIVE: 524 kprobes_inc_nmissed_count(p); 525 setup_singlestep(p, regs, kcb, 1); 526 break; 527 case KPROBE_HIT_SS: 528 /* A probe has been hit in the codepath leading up to, or just 529 * after, single-stepping of a probed instruction. This entire 530 * codepath should strictly reside in .kprobes.text section. 531 * Raise a BUG or we'll continue in an endless reentering loop 532 * and eventually a stack overflow. 533 */ 534 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", 535 p->addr); 536 dump_kprobe(p); 537 BUG(); 538 default: 539 /* impossible cases */ 540 WARN_ON(1); 541 return 0; 542 } 543 544 return 1; 545 } 546 547 /* 548 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 549 * remain disabled throughout this function. 550 */ 551 static int __kprobes kprobe_handler(struct pt_regs *regs) 552 { 553 kprobe_opcode_t *addr; 554 struct kprobe *p; 555 struct kprobe_ctlblk *kcb; 556 557 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 558 /* 559 * We don't want to be preempted for the entire 560 * duration of kprobe processing. We conditionally 561 * re-enable preemption at the end of this function, 562 * and also in reenter_kprobe() and setup_singlestep(). 563 */ 564 preempt_disable(); 565 566 kcb = get_kprobe_ctlblk(); 567 p = get_kprobe(addr); 568 569 if (p) { 570 if (kprobe_running()) { 571 if (reenter_kprobe(p, regs, kcb)) 572 return 1; 573 } else { 574 set_current_kprobe(p, regs, kcb); 575 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 576 577 /* 578 * If we have no pre-handler or it returned 0, we 579 * continue with normal processing. If we have a 580 * pre-handler and it returned non-zero, it prepped 581 * for calling the break_handler below on re-entry 582 * for jprobe processing, so get out doing nothing 583 * more here. 584 */ 585 if (!p->pre_handler || !p->pre_handler(p, regs)) 586 setup_singlestep(p, regs, kcb, 0); 587 return 1; 588 } 589 } else if (*addr != BREAKPOINT_INSTRUCTION) { 590 /* 591 * The breakpoint instruction was removed right 592 * after we hit it. Another cpu has removed 593 * either a probepoint or a debugger breakpoint 594 * at this address. In either case, no further 595 * handling of this interrupt is appropriate. 596 * Back up over the (now missing) int3 and run 597 * the original instruction. 598 */ 599 regs->ip = (unsigned long)addr; 600 preempt_enable_no_resched(); 601 return 1; 602 } else if (kprobe_running()) { 603 p = __this_cpu_read(current_kprobe); 604 if (p->break_handler && p->break_handler(p, regs)) { 605 if (!skip_singlestep(p, regs, kcb)) 606 setup_singlestep(p, regs, kcb, 0); 607 return 1; 608 } 609 } /* else: not a kprobe fault; let the kernel handle it */ 610 611 preempt_enable_no_resched(); 612 return 0; 613 } 614 615 /* 616 * When a retprobed function returns, this code saves registers and 617 * calls trampoline_handler() runs, which calls the kretprobe's handler. 618 */ 619 static void __used __kprobes kretprobe_trampoline_holder(void) 620 { 621 asm volatile ( 622 ".global kretprobe_trampoline\n" 623 "kretprobe_trampoline: \n" 624 #ifdef CONFIG_X86_64 625 /* We don't bother saving the ss register */ 626 " pushq %rsp\n" 627 " pushfq\n" 628 SAVE_REGS_STRING 629 " movq %rsp, %rdi\n" 630 " call trampoline_handler\n" 631 /* Replace saved sp with true return address. */ 632 " movq %rax, 152(%rsp)\n" 633 RESTORE_REGS_STRING 634 " popfq\n" 635 #else 636 " pushf\n" 637 SAVE_REGS_STRING 638 " movl %esp, %eax\n" 639 " call trampoline_handler\n" 640 /* Move flags to cs */ 641 " movl 56(%esp), %edx\n" 642 " movl %edx, 52(%esp)\n" 643 /* Replace saved flags with true return address. */ 644 " movl %eax, 56(%esp)\n" 645 RESTORE_REGS_STRING 646 " popf\n" 647 #endif 648 " ret\n"); 649 } 650 651 /* 652 * Called from kretprobe_trampoline 653 */ 654 static __used __kprobes void *trampoline_handler(struct pt_regs *regs) 655 { 656 struct kretprobe_instance *ri = NULL; 657 struct hlist_head *head, empty_rp; 658 struct hlist_node *tmp; 659 unsigned long flags, orig_ret_address = 0; 660 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 661 kprobe_opcode_t *correct_ret_addr = NULL; 662 663 INIT_HLIST_HEAD(&empty_rp); 664 kretprobe_hash_lock(current, &head, &flags); 665 /* fixup registers */ 666 #ifdef CONFIG_X86_64 667 regs->cs = __KERNEL_CS; 668 #else 669 regs->cs = __KERNEL_CS | get_kernel_rpl(); 670 regs->gs = 0; 671 #endif 672 regs->ip = trampoline_address; 673 regs->orig_ax = ~0UL; 674 675 /* 676 * It is possible to have multiple instances associated with a given 677 * task either because multiple functions in the call path have 678 * return probes installed on them, and/or more than one 679 * return probe was registered for a target function. 680 * 681 * We can handle this because: 682 * - instances are always pushed into the head of the list 683 * - when multiple return probes are registered for the same 684 * function, the (chronologically) first instance's ret_addr 685 * will be the real return address, and all the rest will 686 * point to kretprobe_trampoline. 687 */ 688 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 689 if (ri->task != current) 690 /* another task is sharing our hash bucket */ 691 continue; 692 693 orig_ret_address = (unsigned long)ri->ret_addr; 694 695 if (orig_ret_address != trampoline_address) 696 /* 697 * This is the real return address. Any other 698 * instances associated with this task are for 699 * other calls deeper on the call stack 700 */ 701 break; 702 } 703 704 kretprobe_assert(ri, orig_ret_address, trampoline_address); 705 706 correct_ret_addr = ri->ret_addr; 707 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 708 if (ri->task != current) 709 /* another task is sharing our hash bucket */ 710 continue; 711 712 orig_ret_address = (unsigned long)ri->ret_addr; 713 if (ri->rp && ri->rp->handler) { 714 __this_cpu_write(current_kprobe, &ri->rp->kp); 715 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 716 ri->ret_addr = correct_ret_addr; 717 ri->rp->handler(ri, regs); 718 __this_cpu_write(current_kprobe, NULL); 719 } 720 721 recycle_rp_inst(ri, &empty_rp); 722 723 if (orig_ret_address != trampoline_address) 724 /* 725 * This is the real return address. Any other 726 * instances associated with this task are for 727 * other calls deeper on the call stack 728 */ 729 break; 730 } 731 732 kretprobe_hash_unlock(current, &flags); 733 734 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 735 hlist_del(&ri->hlist); 736 kfree(ri); 737 } 738 return (void *)orig_ret_address; 739 } 740 741 /* 742 * Called after single-stepping. p->addr is the address of the 743 * instruction whose first byte has been replaced by the "int 3" 744 * instruction. To avoid the SMP problems that can occur when we 745 * temporarily put back the original opcode to single-step, we 746 * single-stepped a copy of the instruction. The address of this 747 * copy is p->ainsn.insn. 748 * 749 * This function prepares to return from the post-single-step 750 * interrupt. We have to fix up the stack as follows: 751 * 752 * 0) Except in the case of absolute or indirect jump or call instructions, 753 * the new ip is relative to the copied instruction. We need to make 754 * it relative to the original instruction. 755 * 756 * 1) If the single-stepped instruction was pushfl, then the TF and IF 757 * flags are set in the just-pushed flags, and may need to be cleared. 758 * 759 * 2) If the single-stepped instruction was a call, the return address 760 * that is atop the stack is the address following the copied instruction. 761 * We need to make it the address following the original instruction. 762 * 763 * If this is the first time we've single-stepped the instruction at 764 * this probepoint, and the instruction is boostable, boost it: add a 765 * jump instruction after the copied instruction, that jumps to the next 766 * instruction after the probepoint. 767 */ 768 static void __kprobes 769 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) 770 { 771 unsigned long *tos = stack_addr(regs); 772 unsigned long copy_ip = (unsigned long)p->ainsn.insn; 773 unsigned long orig_ip = (unsigned long)p->addr; 774 kprobe_opcode_t *insn = p->ainsn.insn; 775 776 /* Skip prefixes */ 777 insn = skip_prefixes(insn); 778 779 regs->flags &= ~X86_EFLAGS_TF; 780 switch (*insn) { 781 case 0x9c: /* pushfl */ 782 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF); 783 *tos |= kcb->kprobe_old_flags; 784 break; 785 case 0xc2: /* iret/ret/lret */ 786 case 0xc3: 787 case 0xca: 788 case 0xcb: 789 case 0xcf: 790 case 0xea: /* jmp absolute -- ip is correct */ 791 /* ip is already adjusted, no more changes required */ 792 p->ainsn.boostable = 1; 793 goto no_change; 794 case 0xe8: /* call relative - Fix return addr */ 795 *tos = orig_ip + (*tos - copy_ip); 796 break; 797 #ifdef CONFIG_X86_32 798 case 0x9a: /* call absolute -- same as call absolute, indirect */ 799 *tos = orig_ip + (*tos - copy_ip); 800 goto no_change; 801 #endif 802 case 0xff: 803 if ((insn[1] & 0x30) == 0x10) { 804 /* 805 * call absolute, indirect 806 * Fix return addr; ip is correct. 807 * But this is not boostable 808 */ 809 *tos = orig_ip + (*tos - copy_ip); 810 goto no_change; 811 } else if (((insn[1] & 0x31) == 0x20) || 812 ((insn[1] & 0x31) == 0x21)) { 813 /* 814 * jmp near and far, absolute indirect 815 * ip is correct. And this is boostable 816 */ 817 p->ainsn.boostable = 1; 818 goto no_change; 819 } 820 default: 821 break; 822 } 823 824 if (p->ainsn.boostable == 0) { 825 if ((regs->ip > copy_ip) && 826 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { 827 /* 828 * These instructions can be executed directly if it 829 * jumps back to correct address. 830 */ 831 synthesize_reljump((void *)regs->ip, 832 (void *)orig_ip + (regs->ip - copy_ip)); 833 p->ainsn.boostable = 1; 834 } else { 835 p->ainsn.boostable = -1; 836 } 837 } 838 839 regs->ip += orig_ip - copy_ip; 840 841 no_change: 842 restore_btf(); 843 } 844 845 /* 846 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 847 * remain disabled throughout this function. 848 */ 849 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 850 { 851 struct kprobe *cur = kprobe_running(); 852 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 853 854 if (!cur) 855 return 0; 856 857 resume_execution(cur, regs, kcb); 858 regs->flags |= kcb->kprobe_saved_flags; 859 860 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 861 kcb->kprobe_status = KPROBE_HIT_SSDONE; 862 cur->post_handler(cur, regs, 0); 863 } 864 865 /* Restore back the original saved kprobes variables and continue. */ 866 if (kcb->kprobe_status == KPROBE_REENTER) { 867 restore_previous_kprobe(kcb); 868 goto out; 869 } 870 reset_current_kprobe(); 871 out: 872 preempt_enable_no_resched(); 873 874 /* 875 * if somebody else is singlestepping across a probe point, flags 876 * will have TF set, in which case, continue the remaining processing 877 * of do_debug, as if this is not a probe hit. 878 */ 879 if (regs->flags & X86_EFLAGS_TF) 880 return 0; 881 882 return 1; 883 } 884 885 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 886 { 887 struct kprobe *cur = kprobe_running(); 888 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 889 890 switch (kcb->kprobe_status) { 891 case KPROBE_HIT_SS: 892 case KPROBE_REENTER: 893 /* 894 * We are here because the instruction being single 895 * stepped caused a page fault. We reset the current 896 * kprobe and the ip points back to the probe address 897 * and allow the page fault handler to continue as a 898 * normal page fault. 899 */ 900 regs->ip = (unsigned long)cur->addr; 901 regs->flags |= kcb->kprobe_old_flags; 902 if (kcb->kprobe_status == KPROBE_REENTER) 903 restore_previous_kprobe(kcb); 904 else 905 reset_current_kprobe(); 906 preempt_enable_no_resched(); 907 break; 908 case KPROBE_HIT_ACTIVE: 909 case KPROBE_HIT_SSDONE: 910 /* 911 * We increment the nmissed count for accounting, 912 * we can also use npre/npostfault count for accounting 913 * these specific fault cases. 914 */ 915 kprobes_inc_nmissed_count(cur); 916 917 /* 918 * We come here because instructions in the pre/post 919 * handler caused the page_fault, this could happen 920 * if handler tries to access user space by 921 * copy_from_user(), get_user() etc. Let the 922 * user-specified handler try to fix it first. 923 */ 924 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 925 return 1; 926 927 /* 928 * In case the user-specified fault handler returned 929 * zero, try to fix up. 930 */ 931 if (fixup_exception(regs)) 932 return 1; 933 934 /* 935 * fixup routine could not handle it, 936 * Let do_page_fault() fix it. 937 */ 938 break; 939 default: 940 break; 941 } 942 return 0; 943 } 944 945 /* 946 * Wrapper routine for handling exceptions. 947 */ 948 int __kprobes 949 kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) 950 { 951 struct die_args *args = data; 952 int ret = NOTIFY_DONE; 953 954 if (args->regs && user_mode_vm(args->regs)) 955 return ret; 956 957 switch (val) { 958 case DIE_INT3: 959 if (kprobe_handler(args->regs)) 960 ret = NOTIFY_STOP; 961 break; 962 case DIE_DEBUG: 963 if (post_kprobe_handler(args->regs)) { 964 /* 965 * Reset the BS bit in dr6 (pointed by args->err) to 966 * denote completion of processing 967 */ 968 (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; 969 ret = NOTIFY_STOP; 970 } 971 break; 972 case DIE_GPF: 973 /* 974 * To be potentially processing a kprobe fault and to 975 * trust the result from kprobe_running(), we have 976 * be non-preemptible. 977 */ 978 if (!preemptible() && kprobe_running() && 979 kprobe_fault_handler(args->regs, args->trapnr)) 980 ret = NOTIFY_STOP; 981 break; 982 default: 983 break; 984 } 985 return ret; 986 } 987 988 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 989 { 990 struct jprobe *jp = container_of(p, struct jprobe, kp); 991 unsigned long addr; 992 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 993 994 kcb->jprobe_saved_regs = *regs; 995 kcb->jprobe_saved_sp = stack_addr(regs); 996 addr = (unsigned long)(kcb->jprobe_saved_sp); 997 998 /* 999 * As Linus pointed out, gcc assumes that the callee 1000 * owns the argument space and could overwrite it, e.g. 1001 * tailcall optimization. So, to be absolutely safe 1002 * we also save and restore enough stack bytes to cover 1003 * the argument area. 1004 */ 1005 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, 1006 MIN_STACK_SIZE(addr)); 1007 regs->flags &= ~X86_EFLAGS_IF; 1008 trace_hardirqs_off(); 1009 regs->ip = (unsigned long)(jp->entry); 1010 return 1; 1011 } 1012 1013 void __kprobes jprobe_return(void) 1014 { 1015 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1016 1017 asm volatile ( 1018 #ifdef CONFIG_X86_64 1019 " xchg %%rbx,%%rsp \n" 1020 #else 1021 " xchgl %%ebx,%%esp \n" 1022 #endif 1023 " int3 \n" 1024 " .globl jprobe_return_end\n" 1025 " jprobe_return_end: \n" 1026 " nop \n"::"b" 1027 (kcb->jprobe_saved_sp):"memory"); 1028 } 1029 1030 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 1031 { 1032 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1033 u8 *addr = (u8 *) (regs->ip - 1); 1034 struct jprobe *jp = container_of(p, struct jprobe, kp); 1035 1036 if ((addr > (u8 *) jprobe_return) && 1037 (addr < (u8 *) jprobe_return_end)) { 1038 if (stack_addr(regs) != kcb->jprobe_saved_sp) { 1039 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 1040 printk(KERN_ERR 1041 "current sp %p does not match saved sp %p\n", 1042 stack_addr(regs), kcb->jprobe_saved_sp); 1043 printk(KERN_ERR "Saved registers for jprobe %p\n", jp); 1044 show_regs(saved_regs); 1045 printk(KERN_ERR "Current registers\n"); 1046 show_regs(regs); 1047 BUG(); 1048 } 1049 *regs = kcb->jprobe_saved_regs; 1050 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), 1051 kcb->jprobes_stack, 1052 MIN_STACK_SIZE(kcb->jprobe_saved_sp)); 1053 preempt_enable_no_resched(); 1054 return 1; 1055 } 1056 return 0; 1057 } 1058 1059 int __init arch_init_kprobes(void) 1060 { 1061 return arch_init_optprobes(); 1062 } 1063 1064 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1065 { 1066 return 0; 1067 } 1068