1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004 6 * 7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 8 * Probes initial implementation ( includes contributions from 9 * Rusty Russell). 10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 11 * interface to access function arguments. 12 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 13 * <prasanna@in.ibm.com> adapted for x86_64 from i386. 14 * 2005-Mar Roland McGrath <roland@redhat.com> 15 * Fixed to handle %rip-relative addressing mode correctly. 16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 18 * <prasanna@in.ibm.com> added function-return probes. 19 * 2005-May Rusty Lynch <rusty.lynch@intel.com> 20 * Added function return probes functionality 21 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added 22 * kprobe-booster and kretprobe-booster for i386. 23 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster 24 * and kretprobe-booster for x86-64 25 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven 26 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> 27 * unified x86 kprobes code. 28 */ 29 #include <linux/kprobes.h> 30 #include <linux/ptrace.h> 31 #include <linux/string.h> 32 #include <linux/slab.h> 33 #include <linux/hardirq.h> 34 #include <linux/preempt.h> 35 #include <linux/sched/debug.h> 36 #include <linux/perf_event.h> 37 #include <linux/extable.h> 38 #include <linux/kdebug.h> 39 #include <linux/kallsyms.h> 40 #include <linux/ftrace.h> 41 #include <linux/kasan.h> 42 #include <linux/moduleloader.h> 43 #include <linux/objtool.h> 44 #include <linux/vmalloc.h> 45 #include <linux/pgtable.h> 46 47 #include <asm/text-patching.h> 48 #include <asm/cacheflush.h> 49 #include <asm/desc.h> 50 #include <linux/uaccess.h> 51 #include <asm/alternative.h> 52 #include <asm/insn.h> 53 #include <asm/debugreg.h> 54 #include <asm/set_memory.h> 55 #include <asm/ibt.h> 56 57 #include "common.h" 58 59 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 60 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 61 62 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ 63 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 64 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ 65 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ 66 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ 67 << (row % 32)) 68 /* 69 * Undefined/reserved opcodes, conditional jump, Opcode Extension 70 * Groups, and some special opcodes can not boost. 71 * This is non-const and volatile to keep gcc from statically 72 * optimizing it out, as variable_test_bit makes gcc think only 73 * *(unsigned long*) is used. 74 */ 75 static volatile u32 twobyte_is_boostable[256 / 32] = { 76 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 77 /* ---------------------------------------------- */ 78 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 79 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ 80 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 81 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 82 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 83 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ 84 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ 85 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 86 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ 87 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 88 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ 89 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ 90 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ 91 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ 92 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ 93 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ 94 /* ----------------------------------------------- */ 95 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 96 }; 97 #undef W 98 99 struct kretprobe_blackpoint kretprobe_blacklist[] = { 100 {"__switch_to", }, /* This function switches only current task, but 101 doesn't switch kernel stack.*/ 102 {NULL, NULL} /* Terminator */ 103 }; 104 105 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 106 107 static nokprobe_inline void 108 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op) 109 { 110 struct __arch_relative_insn { 111 u8 op; 112 s32 raddr; 113 } __packed *insn; 114 115 insn = (struct __arch_relative_insn *)dest; 116 insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); 117 insn->op = op; 118 } 119 120 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 121 void synthesize_reljump(void *dest, void *from, void *to) 122 { 123 __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE); 124 } 125 NOKPROBE_SYMBOL(synthesize_reljump); 126 127 /* Insert a call instruction at address 'from', which calls address 'to'.*/ 128 void synthesize_relcall(void *dest, void *from, void *to) 129 { 130 __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE); 131 } 132 NOKPROBE_SYMBOL(synthesize_relcall); 133 134 /* 135 * Returns non-zero if INSN is boostable. 136 * RIP relative instructions are adjusted at copying time in 64 bits mode 137 */ 138 int can_boost(struct insn *insn, void *addr) 139 { 140 kprobe_opcode_t opcode; 141 insn_byte_t prefix; 142 int i; 143 144 if (search_exception_tables((unsigned long)addr)) 145 return 0; /* Page fault may occur on this address. */ 146 147 /* 2nd-byte opcode */ 148 if (insn->opcode.nbytes == 2) 149 return test_bit(insn->opcode.bytes[1], 150 (unsigned long *)twobyte_is_boostable); 151 152 if (insn->opcode.nbytes != 1) 153 return 0; 154 155 for_each_insn_prefix(insn, i, prefix) { 156 insn_attr_t attr; 157 158 attr = inat_get_opcode_attribute(prefix); 159 /* Can't boost Address-size override prefix and CS override prefix */ 160 if (prefix == 0x2e || inat_is_address_size_prefix(attr)) 161 return 0; 162 } 163 164 opcode = insn->opcode.bytes[0]; 165 166 switch (opcode) { 167 case 0x62: /* bound */ 168 case 0x70 ... 0x7f: /* Conditional jumps */ 169 case 0x9a: /* Call far */ 170 case 0xc0 ... 0xc1: /* Grp2 */ 171 case 0xcc ... 0xce: /* software exceptions */ 172 case 0xd0 ... 0xd3: /* Grp2 */ 173 case 0xd6: /* (UD) */ 174 case 0xd8 ... 0xdf: /* ESC */ 175 case 0xe0 ... 0xe3: /* LOOP*, JCXZ */ 176 case 0xe8 ... 0xe9: /* near Call, JMP */ 177 case 0xeb: /* Short JMP */ 178 case 0xf0 ... 0xf4: /* LOCK/REP, HLT */ 179 case 0xf6 ... 0xf7: /* Grp3 */ 180 case 0xfe: /* Grp4 */ 181 /* ... are not boostable */ 182 return 0; 183 case 0xff: /* Grp5 */ 184 /* Only indirect jmp is boostable */ 185 return X86_MODRM_REG(insn->modrm.bytes[0]) == 4; 186 default: 187 return 1; 188 } 189 } 190 191 static unsigned long 192 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) 193 { 194 struct kprobe *kp; 195 bool faddr; 196 197 kp = get_kprobe((void *)addr); 198 faddr = ftrace_location(addr) == addr; 199 /* 200 * Use the current code if it is not modified by Kprobe 201 * and it cannot be modified by ftrace. 202 */ 203 if (!kp && !faddr) 204 return addr; 205 206 /* 207 * Basically, kp->ainsn.insn has an original instruction. 208 * However, RIP-relative instruction can not do single-stepping 209 * at different place, __copy_instruction() tweaks the displacement of 210 * that instruction. In that case, we can't recover the instruction 211 * from the kp->ainsn.insn. 212 * 213 * On the other hand, in case on normal Kprobe, kp->opcode has a copy 214 * of the first byte of the probed instruction, which is overwritten 215 * by int3. And the instruction at kp->addr is not modified by kprobes 216 * except for the first byte, we can recover the original instruction 217 * from it and kp->opcode. 218 * 219 * In case of Kprobes using ftrace, we do not have a copy of 220 * the original instruction. In fact, the ftrace location might 221 * be modified at anytime and even could be in an inconsistent state. 222 * Fortunately, we know that the original code is the ideal 5-byte 223 * long NOP. 224 */ 225 if (copy_from_kernel_nofault(buf, (void *)addr, 226 MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 227 return 0UL; 228 229 if (faddr) 230 memcpy(buf, x86_nops[5], 5); 231 else 232 buf[0] = kp->opcode; 233 return (unsigned long)buf; 234 } 235 236 /* 237 * Recover the probed instruction at addr for further analysis. 238 * Caller must lock kprobes by kprobe_mutex, or disable preemption 239 * for preventing to release referencing kprobes. 240 * Returns zero if the instruction can not get recovered (or access failed). 241 */ 242 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) 243 { 244 unsigned long __addr; 245 246 __addr = __recover_optprobed_insn(buf, addr); 247 if (__addr != addr) 248 return __addr; 249 250 return __recover_probed_insn(buf, addr); 251 } 252 253 /* Check if paddr is at an instruction boundary */ 254 static int can_probe(unsigned long paddr) 255 { 256 unsigned long addr, __addr, offset = 0; 257 struct insn insn; 258 kprobe_opcode_t buf[MAX_INSN_SIZE]; 259 260 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) 261 return 0; 262 263 /* Decode instructions */ 264 addr = paddr - offset; 265 while (addr < paddr) { 266 int ret; 267 268 /* 269 * Check if the instruction has been modified by another 270 * kprobe, in which case we replace the breakpoint by the 271 * original instruction in our buffer. 272 * Also, jump optimization will change the breakpoint to 273 * relative-jump. Since the relative-jump itself is 274 * normally used, we just go through if there is no kprobe. 275 */ 276 __addr = recover_probed_instruction(buf, addr); 277 if (!__addr) 278 return 0; 279 280 ret = insn_decode_kernel(&insn, (void *)__addr); 281 if (ret < 0) 282 return 0; 283 284 /* 285 * Another debugging subsystem might insert this breakpoint. 286 * In that case, we can't recover it. 287 */ 288 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) 289 return 0; 290 addr += insn.length; 291 } 292 293 return (addr == paddr); 294 } 295 296 /* If x86 supports IBT (ENDBR) it must be skipped. */ 297 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, 298 bool *on_func_entry) 299 { 300 if (is_endbr(*(u32 *)addr)) { 301 *on_func_entry = !offset || offset == 4; 302 if (*on_func_entry) 303 offset = 4; 304 305 } else { 306 *on_func_entry = !offset; 307 } 308 309 return (kprobe_opcode_t *)(addr + offset); 310 } 311 312 /* 313 * Copy an instruction with recovering modified instruction by kprobes 314 * and adjust the displacement if the instruction uses the %rip-relative 315 * addressing mode. Note that since @real will be the final place of copied 316 * instruction, displacement must be adjust by @real, not @dest. 317 * This returns the length of copied instruction, or 0 if it has an error. 318 */ 319 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) 320 { 321 kprobe_opcode_t buf[MAX_INSN_SIZE]; 322 unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); 323 int ret; 324 325 if (!recovered_insn || !insn) 326 return 0; 327 328 /* This can access kernel text if given address is not recovered */ 329 if (copy_from_kernel_nofault(dest, (void *)recovered_insn, 330 MAX_INSN_SIZE)) 331 return 0; 332 333 ret = insn_decode_kernel(insn, dest); 334 if (ret < 0) 335 return 0; 336 337 /* We can not probe force emulate prefixed instruction */ 338 if (insn_has_emulate_prefix(insn)) 339 return 0; 340 341 /* Another subsystem puts a breakpoint, failed to recover */ 342 if (insn->opcode.bytes[0] == INT3_INSN_OPCODE) 343 return 0; 344 345 /* We should not singlestep on the exception masking instructions */ 346 if (insn_masking_exception(insn)) 347 return 0; 348 349 #ifdef CONFIG_X86_64 350 /* Only x86_64 has RIP relative instructions */ 351 if (insn_rip_relative(insn)) { 352 s64 newdisp; 353 u8 *disp; 354 /* 355 * The copied instruction uses the %rip-relative addressing 356 * mode. Adjust the displacement for the difference between 357 * the original location of this instruction and the location 358 * of the copy that will actually be run. The tricky bit here 359 * is making sure that the sign extension happens correctly in 360 * this calculation, since we need a signed 32-bit result to 361 * be sign-extended to 64 bits when it's added to the %rip 362 * value and yield the same 64-bit result that the sign- 363 * extension of the original signed 32-bit displacement would 364 * have given. 365 */ 366 newdisp = (u8 *) src + (s64) insn->displacement.value 367 - (u8 *) real; 368 if ((s64) (s32) newdisp != newdisp) { 369 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); 370 return 0; 371 } 372 disp = (u8 *) dest + insn_offset_displacement(insn); 373 *(s32 *) disp = (s32) newdisp; 374 } 375 #endif 376 return insn->length; 377 } 378 379 /* Prepare reljump or int3 right after instruction */ 380 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, 381 struct insn *insn) 382 { 383 int len = insn->length; 384 385 if (!IS_ENABLED(CONFIG_PREEMPTION) && 386 !p->post_handler && can_boost(insn, p->addr) && 387 MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) { 388 /* 389 * These instructions can be executed directly if it 390 * jumps back to correct address. 391 */ 392 synthesize_reljump(buf + len, p->ainsn.insn + len, 393 p->addr + insn->length); 394 len += JMP32_INSN_SIZE; 395 p->ainsn.boostable = 1; 396 } else { 397 /* Otherwise, put an int3 for trapping singlestep */ 398 if (MAX_INSN_SIZE - len < INT3_INSN_SIZE) 399 return -ENOSPC; 400 401 buf[len] = INT3_INSN_OPCODE; 402 len += INT3_INSN_SIZE; 403 } 404 405 return len; 406 } 407 408 /* Make page to RO mode when allocate it */ 409 void *alloc_insn_page(void) 410 { 411 void *page; 412 413 page = module_alloc(PAGE_SIZE); 414 if (!page) 415 return NULL; 416 417 /* 418 * First make the page read-only, and only then make it executable to 419 * prevent it from being W+X in between. 420 */ 421 set_memory_ro((unsigned long)page, 1); 422 423 /* 424 * TODO: Once additional kernel code protection mechanisms are set, ensure 425 * that the page was not maliciously altered and it is still zeroed. 426 */ 427 set_memory_x((unsigned long)page, 1); 428 429 return page; 430 } 431 432 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ 433 434 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) 435 { 436 switch (p->ainsn.opcode) { 437 case 0xfa: /* cli */ 438 regs->flags &= ~(X86_EFLAGS_IF); 439 break; 440 case 0xfb: /* sti */ 441 regs->flags |= X86_EFLAGS_IF; 442 break; 443 case 0x9c: /* pushf */ 444 int3_emulate_push(regs, regs->flags); 445 break; 446 case 0x9d: /* popf */ 447 regs->flags = int3_emulate_pop(regs); 448 break; 449 } 450 regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 451 } 452 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers); 453 454 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs) 455 { 456 int3_emulate_ret(regs); 457 } 458 NOKPROBE_SYMBOL(kprobe_emulate_ret); 459 460 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs) 461 { 462 unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 463 464 func += p->ainsn.rel32; 465 int3_emulate_call(regs, func); 466 } 467 NOKPROBE_SYMBOL(kprobe_emulate_call); 468 469 static nokprobe_inline 470 void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond) 471 { 472 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 473 474 if (cond) 475 ip += p->ainsn.rel32; 476 int3_emulate_jmp(regs, ip); 477 } 478 479 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs) 480 { 481 __kprobe_emulate_jmp(p, regs, true); 482 } 483 NOKPROBE_SYMBOL(kprobe_emulate_jmp); 484 485 static const unsigned long jcc_mask[6] = { 486 [0] = X86_EFLAGS_OF, 487 [1] = X86_EFLAGS_CF, 488 [2] = X86_EFLAGS_ZF, 489 [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, 490 [4] = X86_EFLAGS_SF, 491 [5] = X86_EFLAGS_PF, 492 }; 493 494 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) 495 { 496 bool invert = p->ainsn.jcc.type & 1; 497 bool match; 498 499 if (p->ainsn.jcc.type < 0xc) { 500 match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1]; 501 } else { 502 match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ 503 ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); 504 if (p->ainsn.jcc.type >= 0xe) 505 match = match || (regs->flags & X86_EFLAGS_ZF); 506 } 507 __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert)); 508 } 509 NOKPROBE_SYMBOL(kprobe_emulate_jcc); 510 511 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) 512 { 513 bool match; 514 515 if (p->ainsn.loop.type != 3) { /* LOOP* */ 516 if (p->ainsn.loop.asize == 32) 517 match = ((*(u32 *)®s->cx)--) != 0; 518 #ifdef CONFIG_X86_64 519 else if (p->ainsn.loop.asize == 64) 520 match = ((*(u64 *)®s->cx)--) != 0; 521 #endif 522 else 523 match = ((*(u16 *)®s->cx)--) != 0; 524 } else { /* JCXZ */ 525 if (p->ainsn.loop.asize == 32) 526 match = *(u32 *)(®s->cx) == 0; 527 #ifdef CONFIG_X86_64 528 else if (p->ainsn.loop.asize == 64) 529 match = *(u64 *)(®s->cx) == 0; 530 #endif 531 else 532 match = *(u16 *)(®s->cx) == 0; 533 } 534 535 if (p->ainsn.loop.type == 0) /* LOOPNE */ 536 match = match && !(regs->flags & X86_EFLAGS_ZF); 537 else if (p->ainsn.loop.type == 1) /* LOOPE */ 538 match = match && (regs->flags & X86_EFLAGS_ZF); 539 540 __kprobe_emulate_jmp(p, regs, match); 541 } 542 NOKPROBE_SYMBOL(kprobe_emulate_loop); 543 544 static const int addrmode_regoffs[] = { 545 offsetof(struct pt_regs, ax), 546 offsetof(struct pt_regs, cx), 547 offsetof(struct pt_regs, dx), 548 offsetof(struct pt_regs, bx), 549 offsetof(struct pt_regs, sp), 550 offsetof(struct pt_regs, bp), 551 offsetof(struct pt_regs, si), 552 offsetof(struct pt_regs, di), 553 #ifdef CONFIG_X86_64 554 offsetof(struct pt_regs, r8), 555 offsetof(struct pt_regs, r9), 556 offsetof(struct pt_regs, r10), 557 offsetof(struct pt_regs, r11), 558 offsetof(struct pt_regs, r12), 559 offsetof(struct pt_regs, r13), 560 offsetof(struct pt_regs, r14), 561 offsetof(struct pt_regs, r15), 562 #endif 563 }; 564 565 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs) 566 { 567 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; 568 569 int3_emulate_call(regs, regs_get_register(regs, offs)); 570 } 571 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); 572 573 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs) 574 { 575 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; 576 577 int3_emulate_jmp(regs, regs_get_register(regs, offs)); 578 } 579 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect); 580 581 static int prepare_emulation(struct kprobe *p, struct insn *insn) 582 { 583 insn_byte_t opcode = insn->opcode.bytes[0]; 584 585 switch (opcode) { 586 case 0xfa: /* cli */ 587 case 0xfb: /* sti */ 588 case 0x9c: /* pushfl */ 589 case 0x9d: /* popf/popfd */ 590 /* 591 * IF modifiers must be emulated since it will enable interrupt while 592 * int3 single stepping. 593 */ 594 p->ainsn.emulate_op = kprobe_emulate_ifmodifiers; 595 p->ainsn.opcode = opcode; 596 break; 597 case 0xc2: /* ret/lret */ 598 case 0xc3: 599 case 0xca: 600 case 0xcb: 601 p->ainsn.emulate_op = kprobe_emulate_ret; 602 break; 603 case 0x9a: /* far call absolute -- segment is not supported */ 604 case 0xea: /* far jmp absolute -- segment is not supported */ 605 case 0xcc: /* int3 */ 606 case 0xcf: /* iret -- in-kernel IRET is not supported */ 607 return -EOPNOTSUPP; 608 break; 609 case 0xe8: /* near call relative */ 610 p->ainsn.emulate_op = kprobe_emulate_call; 611 if (insn->immediate.nbytes == 2) 612 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 613 else 614 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 615 break; 616 case 0xeb: /* short jump relative */ 617 case 0xe9: /* near jump relative */ 618 p->ainsn.emulate_op = kprobe_emulate_jmp; 619 if (insn->immediate.nbytes == 1) 620 p->ainsn.rel32 = *(s8 *)&insn->immediate.value; 621 else if (insn->immediate.nbytes == 2) 622 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 623 else 624 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 625 break; 626 case 0x70 ... 0x7f: 627 /* 1 byte conditional jump */ 628 p->ainsn.emulate_op = kprobe_emulate_jcc; 629 p->ainsn.jcc.type = opcode & 0xf; 630 p->ainsn.rel32 = *(char *)insn->immediate.bytes; 631 break; 632 case 0x0f: 633 opcode = insn->opcode.bytes[1]; 634 if ((opcode & 0xf0) == 0x80) { 635 /* 2 bytes Conditional Jump */ 636 p->ainsn.emulate_op = kprobe_emulate_jcc; 637 p->ainsn.jcc.type = opcode & 0xf; 638 if (insn->immediate.nbytes == 2) 639 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 640 else 641 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 642 } else if (opcode == 0x01 && 643 X86_MODRM_REG(insn->modrm.bytes[0]) == 0 && 644 X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) { 645 /* VM extensions - not supported */ 646 return -EOPNOTSUPP; 647 } 648 break; 649 case 0xe0: /* Loop NZ */ 650 case 0xe1: /* Loop */ 651 case 0xe2: /* Loop */ 652 case 0xe3: /* J*CXZ */ 653 p->ainsn.emulate_op = kprobe_emulate_loop; 654 p->ainsn.loop.type = opcode & 0x3; 655 p->ainsn.loop.asize = insn->addr_bytes * 8; 656 p->ainsn.rel32 = *(s8 *)&insn->immediate.value; 657 break; 658 case 0xff: 659 /* 660 * Since the 0xff is an extended group opcode, the instruction 661 * is determined by the MOD/RM byte. 662 */ 663 opcode = insn->modrm.bytes[0]; 664 if ((opcode & 0x30) == 0x10) { 665 if ((opcode & 0x8) == 0x8) 666 return -EOPNOTSUPP; /* far call */ 667 /* call absolute, indirect */ 668 p->ainsn.emulate_op = kprobe_emulate_call_indirect; 669 } else if ((opcode & 0x30) == 0x20) { 670 if ((opcode & 0x8) == 0x8) 671 return -EOPNOTSUPP; /* far jmp */ 672 /* jmp near absolute indirect */ 673 p->ainsn.emulate_op = kprobe_emulate_jmp_indirect; 674 } else 675 break; 676 677 if (insn->addr_bytes != sizeof(unsigned long)) 678 return -EOPNOTSUPP; /* Don't support different size */ 679 if (X86_MODRM_MOD(opcode) != 3) 680 return -EOPNOTSUPP; /* TODO: support memory addressing */ 681 682 p->ainsn.indirect.reg = X86_MODRM_RM(opcode); 683 #ifdef CONFIG_X86_64 684 if (X86_REX_B(insn->rex_prefix.value)) 685 p->ainsn.indirect.reg += 8; 686 #endif 687 break; 688 default: 689 break; 690 } 691 p->ainsn.size = insn->length; 692 693 return 0; 694 } 695 696 static int arch_copy_kprobe(struct kprobe *p) 697 { 698 struct insn insn; 699 kprobe_opcode_t buf[MAX_INSN_SIZE]; 700 int ret, len; 701 702 /* Copy an instruction with recovering if other optprobe modifies it.*/ 703 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn); 704 if (!len) 705 return -EINVAL; 706 707 /* Analyze the opcode and setup emulate functions */ 708 ret = prepare_emulation(p, &insn); 709 if (ret < 0) 710 return ret; 711 712 /* Add int3 for single-step or booster jmp */ 713 len = prepare_singlestep(buf, p, &insn); 714 if (len < 0) 715 return len; 716 717 /* Also, displacement change doesn't affect the first byte */ 718 p->opcode = buf[0]; 719 720 p->ainsn.tp_len = len; 721 perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len); 722 723 /* OK, write back the instruction(s) into ROX insn buffer */ 724 text_poke(p->ainsn.insn, buf, len); 725 726 return 0; 727 } 728 729 int arch_prepare_kprobe(struct kprobe *p) 730 { 731 int ret; 732 733 if (alternatives_text_reserved(p->addr, p->addr)) 734 return -EINVAL; 735 736 if (!can_probe((unsigned long)p->addr)) 737 return -EILSEQ; 738 739 memset(&p->ainsn, 0, sizeof(p->ainsn)); 740 741 /* insn: must be on special executable page on x86. */ 742 p->ainsn.insn = get_insn_slot(); 743 if (!p->ainsn.insn) 744 return -ENOMEM; 745 746 ret = arch_copy_kprobe(p); 747 if (ret) { 748 free_insn_slot(p->ainsn.insn, 0); 749 p->ainsn.insn = NULL; 750 } 751 752 return ret; 753 } 754 755 void arch_arm_kprobe(struct kprobe *p) 756 { 757 u8 int3 = INT3_INSN_OPCODE; 758 759 text_poke(p->addr, &int3, 1); 760 text_poke_sync(); 761 perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1); 762 } 763 764 void arch_disarm_kprobe(struct kprobe *p) 765 { 766 u8 int3 = INT3_INSN_OPCODE; 767 768 perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1); 769 text_poke(p->addr, &p->opcode, 1); 770 text_poke_sync(); 771 } 772 773 void arch_remove_kprobe(struct kprobe *p) 774 { 775 if (p->ainsn.insn) { 776 /* Record the perf event before freeing the slot */ 777 perf_event_text_poke(p->ainsn.insn, p->ainsn.insn, 778 p->ainsn.tp_len, NULL, 0); 779 free_insn_slot(p->ainsn.insn, p->ainsn.boostable); 780 p->ainsn.insn = NULL; 781 } 782 } 783 784 static nokprobe_inline void 785 save_previous_kprobe(struct kprobe_ctlblk *kcb) 786 { 787 kcb->prev_kprobe.kp = kprobe_running(); 788 kcb->prev_kprobe.status = kcb->kprobe_status; 789 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; 790 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; 791 } 792 793 static nokprobe_inline void 794 restore_previous_kprobe(struct kprobe_ctlblk *kcb) 795 { 796 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 797 kcb->kprobe_status = kcb->prev_kprobe.status; 798 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; 799 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; 800 } 801 802 static nokprobe_inline void 803 set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 804 struct kprobe_ctlblk *kcb) 805 { 806 __this_cpu_write(current_kprobe, p); 807 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 808 = (regs->flags & X86_EFLAGS_IF); 809 } 810 811 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs, 812 struct kprobe_ctlblk *kcb) 813 { 814 /* Restore back the original saved kprobes variables and continue. */ 815 if (kcb->kprobe_status == KPROBE_REENTER) { 816 /* This will restore both kcb and current_kprobe */ 817 restore_previous_kprobe(kcb); 818 } else { 819 /* 820 * Always update the kcb status because 821 * reset_curent_kprobe() doesn't update kcb. 822 */ 823 kcb->kprobe_status = KPROBE_HIT_SSDONE; 824 if (cur->post_handler) 825 cur->post_handler(cur, regs, 0); 826 reset_current_kprobe(); 827 } 828 } 829 NOKPROBE_SYMBOL(kprobe_post_process); 830 831 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, 832 struct kprobe_ctlblk *kcb, int reenter) 833 { 834 if (setup_detour_execution(p, regs, reenter)) 835 return; 836 837 #if !defined(CONFIG_PREEMPTION) 838 if (p->ainsn.boostable) { 839 /* Boost up -- we can execute copied instructions directly */ 840 if (!reenter) 841 reset_current_kprobe(); 842 /* 843 * Reentering boosted probe doesn't reset current_kprobe, 844 * nor set current_kprobe, because it doesn't use single 845 * stepping. 846 */ 847 regs->ip = (unsigned long)p->ainsn.insn; 848 return; 849 } 850 #endif 851 if (reenter) { 852 save_previous_kprobe(kcb); 853 set_current_kprobe(p, regs, kcb); 854 kcb->kprobe_status = KPROBE_REENTER; 855 } else 856 kcb->kprobe_status = KPROBE_HIT_SS; 857 858 if (p->ainsn.emulate_op) { 859 p->ainsn.emulate_op(p, regs); 860 kprobe_post_process(p, regs, kcb); 861 return; 862 } 863 864 /* Disable interrupt, and set ip register on trampoline */ 865 regs->flags &= ~X86_EFLAGS_IF; 866 regs->ip = (unsigned long)p->ainsn.insn; 867 } 868 NOKPROBE_SYMBOL(setup_singlestep); 869 870 /* 871 * Called after single-stepping. p->addr is the address of the 872 * instruction whose first byte has been replaced by the "int3" 873 * instruction. To avoid the SMP problems that can occur when we 874 * temporarily put back the original opcode to single-step, we 875 * single-stepped a copy of the instruction. The address of this 876 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again 877 * right after the copied instruction. 878 * Different from the trap single-step, "int3" single-step can not 879 * handle the instruction which changes the ip register, e.g. jmp, 880 * call, conditional jmp, and the instructions which changes the IF 881 * flags because interrupt must be disabled around the single-stepping. 882 * Such instructions are software emulated, but others are single-stepped 883 * using "int3". 884 * 885 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to 886 * be adjusted, so that we can resume execution on correct code. 887 */ 888 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs, 889 struct kprobe_ctlblk *kcb) 890 { 891 unsigned long copy_ip = (unsigned long)p->ainsn.insn; 892 unsigned long orig_ip = (unsigned long)p->addr; 893 894 /* Restore saved interrupt flag and ip register */ 895 regs->flags |= kcb->kprobe_saved_flags; 896 /* Note that regs->ip is executed int3 so must be a step back */ 897 regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE; 898 } 899 NOKPROBE_SYMBOL(resume_singlestep); 900 901 /* 902 * We have reentered the kprobe_handler(), since another probe was hit while 903 * within the handler. We save the original kprobes variables and just single 904 * step on the instruction of the new probe without calling any user handlers. 905 */ 906 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, 907 struct kprobe_ctlblk *kcb) 908 { 909 switch (kcb->kprobe_status) { 910 case KPROBE_HIT_SSDONE: 911 case KPROBE_HIT_ACTIVE: 912 case KPROBE_HIT_SS: 913 kprobes_inc_nmissed_count(p); 914 setup_singlestep(p, regs, kcb, 1); 915 break; 916 case KPROBE_REENTER: 917 /* A probe has been hit in the codepath leading up to, or just 918 * after, single-stepping of a probed instruction. This entire 919 * codepath should strictly reside in .kprobes.text section. 920 * Raise a BUG or we'll continue in an endless reentering loop 921 * and eventually a stack overflow. 922 */ 923 pr_err("Unrecoverable kprobe detected.\n"); 924 dump_kprobe(p); 925 BUG(); 926 default: 927 /* impossible cases */ 928 WARN_ON(1); 929 return 0; 930 } 931 932 return 1; 933 } 934 NOKPROBE_SYMBOL(reenter_kprobe); 935 936 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb) 937 { 938 return (kcb->kprobe_status == KPROBE_HIT_SS || 939 kcb->kprobe_status == KPROBE_REENTER); 940 } 941 942 /* 943 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 944 * remain disabled throughout this function. 945 */ 946 int kprobe_int3_handler(struct pt_regs *regs) 947 { 948 kprobe_opcode_t *addr; 949 struct kprobe *p; 950 struct kprobe_ctlblk *kcb; 951 952 if (user_mode(regs)) 953 return 0; 954 955 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 956 /* 957 * We don't want to be preempted for the entire duration of kprobe 958 * processing. Since int3 and debug trap disables irqs and we clear 959 * IF while singlestepping, it must be no preemptible. 960 */ 961 962 kcb = get_kprobe_ctlblk(); 963 p = get_kprobe(addr); 964 965 if (p) { 966 if (kprobe_running()) { 967 if (reenter_kprobe(p, regs, kcb)) 968 return 1; 969 } else { 970 set_current_kprobe(p, regs, kcb); 971 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 972 973 /* 974 * If we have no pre-handler or it returned 0, we 975 * continue with normal processing. If we have a 976 * pre-handler and it returned non-zero, that means 977 * user handler setup registers to exit to another 978 * instruction, we must skip the single stepping. 979 */ 980 if (!p->pre_handler || !p->pre_handler(p, regs)) 981 setup_singlestep(p, regs, kcb, 0); 982 else 983 reset_current_kprobe(); 984 return 1; 985 } 986 } else if (kprobe_is_ss(kcb)) { 987 p = kprobe_running(); 988 if ((unsigned long)p->ainsn.insn < regs->ip && 989 (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) { 990 /* Most provably this is the second int3 for singlestep */ 991 resume_singlestep(p, regs, kcb); 992 kprobe_post_process(p, regs, kcb); 993 return 1; 994 } 995 } 996 997 if (*addr != INT3_INSN_OPCODE) { 998 /* 999 * The breakpoint instruction was removed right 1000 * after we hit it. Another cpu has removed 1001 * either a probepoint or a debugger breakpoint 1002 * at this address. In either case, no further 1003 * handling of this interrupt is appropriate. 1004 * Back up over the (now missing) int3 and run 1005 * the original instruction. 1006 */ 1007 regs->ip = (unsigned long)addr; 1008 return 1; 1009 } /* else: not a kprobe fault; let the kernel handle it */ 1010 1011 return 0; 1012 } 1013 NOKPROBE_SYMBOL(kprobe_int3_handler); 1014 1015 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 1016 { 1017 struct kprobe *cur = kprobe_running(); 1018 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1019 1020 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { 1021 /* This must happen on single-stepping */ 1022 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && 1023 kcb->kprobe_status != KPROBE_REENTER); 1024 /* 1025 * We are here because the instruction being single 1026 * stepped caused a page fault. We reset the current 1027 * kprobe and the ip points back to the probe address 1028 * and allow the page fault handler to continue as a 1029 * normal page fault. 1030 */ 1031 regs->ip = (unsigned long)cur->addr; 1032 1033 /* 1034 * If the IF flag was set before the kprobe hit, 1035 * don't touch it: 1036 */ 1037 regs->flags |= kcb->kprobe_old_flags; 1038 1039 if (kcb->kprobe_status == KPROBE_REENTER) 1040 restore_previous_kprobe(kcb); 1041 else 1042 reset_current_kprobe(); 1043 } 1044 1045 return 0; 1046 } 1047 NOKPROBE_SYMBOL(kprobe_fault_handler); 1048 1049 int __init arch_populate_kprobe_blacklist(void) 1050 { 1051 return kprobe_add_area_blacklist((unsigned long)__entry_text_start, 1052 (unsigned long)__entry_text_end); 1053 } 1054 1055 int __init arch_init_kprobes(void) 1056 { 1057 return 0; 1058 } 1059 1060 int arch_trampoline_kprobe(struct kprobe *p) 1061 { 1062 return 0; 1063 } 1064