1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004 6 * 7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 8 * Probes initial implementation ( includes contributions from 9 * Rusty Russell). 10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 11 * interface to access function arguments. 12 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 13 * <prasanna@in.ibm.com> adapted for x86_64 from i386. 14 * 2005-Mar Roland McGrath <roland@redhat.com> 15 * Fixed to handle %rip-relative addressing mode correctly. 16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 18 * <prasanna@in.ibm.com> added function-return probes. 19 * 2005-May Rusty Lynch <rusty.lynch@intel.com> 20 * Added function return probes functionality 21 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added 22 * kprobe-booster and kretprobe-booster for i386. 23 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster 24 * and kretprobe-booster for x86-64 25 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven 26 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com> 27 * unified x86 kprobes code. 28 */ 29 #include <linux/kprobes.h> 30 #include <linux/ptrace.h> 31 #include <linux/string.h> 32 #include <linux/slab.h> 33 #include <linux/hardirq.h> 34 #include <linux/preempt.h> 35 #include <linux/sched/debug.h> 36 #include <linux/perf_event.h> 37 #include <linux/extable.h> 38 #include <linux/kdebug.h> 39 #include <linux/kallsyms.h> 40 #include <linux/kgdb.h> 41 #include <linux/ftrace.h> 42 #include <linux/kasan.h> 43 #include <linux/moduleloader.h> 44 #include <linux/objtool.h> 45 #include <linux/vmalloc.h> 46 #include <linux/pgtable.h> 47 #include <linux/set_memory.h> 48 49 #include <asm/text-patching.h> 50 #include <asm/cacheflush.h> 51 #include <asm/desc.h> 52 #include <linux/uaccess.h> 53 #include <asm/alternative.h> 54 #include <asm/insn.h> 55 #include <asm/debugreg.h> 56 #include <asm/ibt.h> 57 58 #include "common.h" 59 60 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 61 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 62 63 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\ 64 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 65 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \ 66 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \ 67 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \ 68 << (row % 32)) 69 /* 70 * Undefined/reserved opcodes, conditional jump, Opcode Extension 71 * Groups, and some special opcodes can not boost. 72 * This is non-const and volatile to keep gcc from statically 73 * optimizing it out, as variable_test_bit makes gcc think only 74 * *(unsigned long*) is used. 75 */ 76 static volatile u32 twobyte_is_boostable[256 / 32] = { 77 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 78 /* ---------------------------------------------- */ 79 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ 80 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ 81 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ 82 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ 83 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ 84 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ 85 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */ 86 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ 87 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */ 88 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ 89 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */ 90 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */ 91 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ 92 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */ 93 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */ 94 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */ 95 /* ----------------------------------------------- */ 96 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 97 }; 98 #undef W 99 100 struct kretprobe_blackpoint kretprobe_blacklist[] = { 101 {"__switch_to", }, /* This function switches only current task, but 102 doesn't switch kernel stack.*/ 103 {NULL, NULL} /* Terminator */ 104 }; 105 106 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); 107 108 static nokprobe_inline void 109 __synthesize_relative_insn(void *dest, void *from, void *to, u8 op) 110 { 111 struct __arch_relative_insn { 112 u8 op; 113 s32 raddr; 114 } __packed *insn; 115 116 insn = (struct __arch_relative_insn *)dest; 117 insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); 118 insn->op = op; 119 } 120 121 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ 122 void synthesize_reljump(void *dest, void *from, void *to) 123 { 124 __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE); 125 } 126 NOKPROBE_SYMBOL(synthesize_reljump); 127 128 /* Insert a call instruction at address 'from', which calls address 'to'.*/ 129 void synthesize_relcall(void *dest, void *from, void *to) 130 { 131 __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE); 132 } 133 NOKPROBE_SYMBOL(synthesize_relcall); 134 135 /* 136 * Returns non-zero if INSN is boostable. 137 * RIP relative instructions are adjusted at copying time in 64 bits mode 138 */ 139 int can_boost(struct insn *insn, void *addr) 140 { 141 kprobe_opcode_t opcode; 142 insn_byte_t prefix; 143 int i; 144 145 if (search_exception_tables((unsigned long)addr)) 146 return 0; /* Page fault may occur on this address. */ 147 148 /* 2nd-byte opcode */ 149 if (insn->opcode.nbytes == 2) 150 return test_bit(insn->opcode.bytes[1], 151 (unsigned long *)twobyte_is_boostable); 152 153 if (insn->opcode.nbytes != 1) 154 return 0; 155 156 for_each_insn_prefix(insn, i, prefix) { 157 insn_attr_t attr; 158 159 attr = inat_get_opcode_attribute(prefix); 160 /* Can't boost Address-size override prefix and CS override prefix */ 161 if (prefix == 0x2e || inat_is_address_size_prefix(attr)) 162 return 0; 163 } 164 165 opcode = insn->opcode.bytes[0]; 166 167 switch (opcode) { 168 case 0x62: /* bound */ 169 case 0x70 ... 0x7f: /* Conditional jumps */ 170 case 0x9a: /* Call far */ 171 case 0xc0 ... 0xc1: /* Grp2 */ 172 case 0xcc ... 0xce: /* software exceptions */ 173 case 0xd0 ... 0xd3: /* Grp2 */ 174 case 0xd6: /* (UD) */ 175 case 0xd8 ... 0xdf: /* ESC */ 176 case 0xe0 ... 0xe3: /* LOOP*, JCXZ */ 177 case 0xe8 ... 0xe9: /* near Call, JMP */ 178 case 0xeb: /* Short JMP */ 179 case 0xf0 ... 0xf4: /* LOCK/REP, HLT */ 180 case 0xf6 ... 0xf7: /* Grp3 */ 181 case 0xfe: /* Grp4 */ 182 /* ... are not boostable */ 183 return 0; 184 case 0xff: /* Grp5 */ 185 /* Only indirect jmp is boostable */ 186 return X86_MODRM_REG(insn->modrm.bytes[0]) == 4; 187 default: 188 return 1; 189 } 190 } 191 192 static unsigned long 193 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) 194 { 195 struct kprobe *kp; 196 bool faddr; 197 198 kp = get_kprobe((void *)addr); 199 faddr = ftrace_location(addr) == addr; 200 /* 201 * Use the current code if it is not modified by Kprobe 202 * and it cannot be modified by ftrace. 203 */ 204 if (!kp && !faddr) 205 return addr; 206 207 /* 208 * Basically, kp->ainsn.insn has an original instruction. 209 * However, RIP-relative instruction can not do single-stepping 210 * at different place, __copy_instruction() tweaks the displacement of 211 * that instruction. In that case, we can't recover the instruction 212 * from the kp->ainsn.insn. 213 * 214 * On the other hand, in case on normal Kprobe, kp->opcode has a copy 215 * of the first byte of the probed instruction, which is overwritten 216 * by int3. And the instruction at kp->addr is not modified by kprobes 217 * except for the first byte, we can recover the original instruction 218 * from it and kp->opcode. 219 * 220 * In case of Kprobes using ftrace, we do not have a copy of 221 * the original instruction. In fact, the ftrace location might 222 * be modified at anytime and even could be in an inconsistent state. 223 * Fortunately, we know that the original code is the ideal 5-byte 224 * long NOP. 225 */ 226 if (copy_from_kernel_nofault(buf, (void *)addr, 227 MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 228 return 0UL; 229 230 if (faddr) 231 memcpy(buf, x86_nops[5], 5); 232 else 233 buf[0] = kp->opcode; 234 return (unsigned long)buf; 235 } 236 237 /* 238 * Recover the probed instruction at addr for further analysis. 239 * Caller must lock kprobes by kprobe_mutex, or disable preemption 240 * for preventing to release referencing kprobes. 241 * Returns zero if the instruction can not get recovered (or access failed). 242 */ 243 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) 244 { 245 unsigned long __addr; 246 247 __addr = __recover_optprobed_insn(buf, addr); 248 if (__addr != addr) 249 return __addr; 250 251 return __recover_probed_insn(buf, addr); 252 } 253 254 /* Check if paddr is at an instruction boundary */ 255 static int can_probe(unsigned long paddr) 256 { 257 unsigned long addr, __addr, offset = 0; 258 struct insn insn; 259 kprobe_opcode_t buf[MAX_INSN_SIZE]; 260 261 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) 262 return 0; 263 264 /* Decode instructions */ 265 addr = paddr - offset; 266 while (addr < paddr) { 267 int ret; 268 269 /* 270 * Check if the instruction has been modified by another 271 * kprobe, in which case we replace the breakpoint by the 272 * original instruction in our buffer. 273 * Also, jump optimization will change the breakpoint to 274 * relative-jump. Since the relative-jump itself is 275 * normally used, we just go through if there is no kprobe. 276 */ 277 __addr = recover_probed_instruction(buf, addr); 278 if (!__addr) 279 return 0; 280 281 ret = insn_decode_kernel(&insn, (void *)__addr); 282 if (ret < 0) 283 return 0; 284 285 #ifdef CONFIG_KGDB 286 /* 287 * If there is a dynamically installed kgdb sw breakpoint, 288 * this function should not be probed. 289 */ 290 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && 291 kgdb_has_hit_break(addr)) 292 return 0; 293 #endif 294 addr += insn.length; 295 } 296 297 return (addr == paddr); 298 } 299 300 /* If x86 supports IBT (ENDBR) it must be skipped. */ 301 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, 302 bool *on_func_entry) 303 { 304 if (is_endbr(*(u32 *)addr)) { 305 *on_func_entry = !offset || offset == 4; 306 if (*on_func_entry) 307 offset = 4; 308 309 } else { 310 *on_func_entry = !offset; 311 } 312 313 return (kprobe_opcode_t *)(addr + offset); 314 } 315 316 /* 317 * Copy an instruction with recovering modified instruction by kprobes 318 * and adjust the displacement if the instruction uses the %rip-relative 319 * addressing mode. Note that since @real will be the final place of copied 320 * instruction, displacement must be adjust by @real, not @dest. 321 * This returns the length of copied instruction, or 0 if it has an error. 322 */ 323 int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) 324 { 325 kprobe_opcode_t buf[MAX_INSN_SIZE]; 326 unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); 327 int ret; 328 329 if (!recovered_insn || !insn) 330 return 0; 331 332 /* This can access kernel text if given address is not recovered */ 333 if (copy_from_kernel_nofault(dest, (void *)recovered_insn, 334 MAX_INSN_SIZE)) 335 return 0; 336 337 ret = insn_decode_kernel(insn, dest); 338 if (ret < 0) 339 return 0; 340 341 /* We can not probe force emulate prefixed instruction */ 342 if (insn_has_emulate_prefix(insn)) 343 return 0; 344 345 /* Another subsystem puts a breakpoint, failed to recover */ 346 if (insn->opcode.bytes[0] == INT3_INSN_OPCODE) 347 return 0; 348 349 /* We should not singlestep on the exception masking instructions */ 350 if (insn_masking_exception(insn)) 351 return 0; 352 353 #ifdef CONFIG_X86_64 354 /* Only x86_64 has RIP relative instructions */ 355 if (insn_rip_relative(insn)) { 356 s64 newdisp; 357 u8 *disp; 358 /* 359 * The copied instruction uses the %rip-relative addressing 360 * mode. Adjust the displacement for the difference between 361 * the original location of this instruction and the location 362 * of the copy that will actually be run. The tricky bit here 363 * is making sure that the sign extension happens correctly in 364 * this calculation, since we need a signed 32-bit result to 365 * be sign-extended to 64 bits when it's added to the %rip 366 * value and yield the same 64-bit result that the sign- 367 * extension of the original signed 32-bit displacement would 368 * have given. 369 */ 370 newdisp = (u8 *) src + (s64) insn->displacement.value 371 - (u8 *) real; 372 if ((s64) (s32) newdisp != newdisp) { 373 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); 374 return 0; 375 } 376 disp = (u8 *) dest + insn_offset_displacement(insn); 377 *(s32 *) disp = (s32) newdisp; 378 } 379 #endif 380 return insn->length; 381 } 382 383 /* Prepare reljump or int3 right after instruction */ 384 static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p, 385 struct insn *insn) 386 { 387 int len = insn->length; 388 389 if (!IS_ENABLED(CONFIG_PREEMPTION) && 390 !p->post_handler && can_boost(insn, p->addr) && 391 MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) { 392 /* 393 * These instructions can be executed directly if it 394 * jumps back to correct address. 395 */ 396 synthesize_reljump(buf + len, p->ainsn.insn + len, 397 p->addr + insn->length); 398 len += JMP32_INSN_SIZE; 399 p->ainsn.boostable = 1; 400 } else { 401 /* Otherwise, put an int3 for trapping singlestep */ 402 if (MAX_INSN_SIZE - len < INT3_INSN_SIZE) 403 return -ENOSPC; 404 405 buf[len] = INT3_INSN_OPCODE; 406 len += INT3_INSN_SIZE; 407 } 408 409 return len; 410 } 411 412 /* Make page to RO mode when allocate it */ 413 void *alloc_insn_page(void) 414 { 415 void *page; 416 417 page = module_alloc(PAGE_SIZE); 418 if (!page) 419 return NULL; 420 421 /* 422 * TODO: Once additional kernel code protection mechanisms are set, ensure 423 * that the page was not maliciously altered and it is still zeroed. 424 */ 425 set_memory_rox((unsigned long)page, 1); 426 427 return page; 428 } 429 430 /* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */ 431 432 static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs) 433 { 434 switch (p->ainsn.opcode) { 435 case 0xfa: /* cli */ 436 regs->flags &= ~(X86_EFLAGS_IF); 437 break; 438 case 0xfb: /* sti */ 439 regs->flags |= X86_EFLAGS_IF; 440 break; 441 case 0x9c: /* pushf */ 442 int3_emulate_push(regs, regs->flags); 443 break; 444 case 0x9d: /* popf */ 445 regs->flags = int3_emulate_pop(regs); 446 break; 447 } 448 regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 449 } 450 NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers); 451 452 static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs) 453 { 454 int3_emulate_ret(regs); 455 } 456 NOKPROBE_SYMBOL(kprobe_emulate_ret); 457 458 static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs) 459 { 460 unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 461 462 func += p->ainsn.rel32; 463 int3_emulate_call(regs, func); 464 } 465 NOKPROBE_SYMBOL(kprobe_emulate_call); 466 467 static nokprobe_inline 468 void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond) 469 { 470 unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; 471 472 if (cond) 473 ip += p->ainsn.rel32; 474 int3_emulate_jmp(regs, ip); 475 } 476 477 static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs) 478 { 479 __kprobe_emulate_jmp(p, regs, true); 480 } 481 NOKPROBE_SYMBOL(kprobe_emulate_jmp); 482 483 static const unsigned long jcc_mask[6] = { 484 [0] = X86_EFLAGS_OF, 485 [1] = X86_EFLAGS_CF, 486 [2] = X86_EFLAGS_ZF, 487 [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, 488 [4] = X86_EFLAGS_SF, 489 [5] = X86_EFLAGS_PF, 490 }; 491 492 static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) 493 { 494 bool invert = p->ainsn.jcc.type & 1; 495 bool match; 496 497 if (p->ainsn.jcc.type < 0xc) { 498 match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1]; 499 } else { 500 match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ 501 ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); 502 if (p->ainsn.jcc.type >= 0xe) 503 match = match || (regs->flags & X86_EFLAGS_ZF); 504 } 505 __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert)); 506 } 507 NOKPROBE_SYMBOL(kprobe_emulate_jcc); 508 509 static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) 510 { 511 bool match; 512 513 if (p->ainsn.loop.type != 3) { /* LOOP* */ 514 if (p->ainsn.loop.asize == 32) 515 match = ((*(u32 *)®s->cx)--) != 0; 516 #ifdef CONFIG_X86_64 517 else if (p->ainsn.loop.asize == 64) 518 match = ((*(u64 *)®s->cx)--) != 0; 519 #endif 520 else 521 match = ((*(u16 *)®s->cx)--) != 0; 522 } else { /* JCXZ */ 523 if (p->ainsn.loop.asize == 32) 524 match = *(u32 *)(®s->cx) == 0; 525 #ifdef CONFIG_X86_64 526 else if (p->ainsn.loop.asize == 64) 527 match = *(u64 *)(®s->cx) == 0; 528 #endif 529 else 530 match = *(u16 *)(®s->cx) == 0; 531 } 532 533 if (p->ainsn.loop.type == 0) /* LOOPNE */ 534 match = match && !(regs->flags & X86_EFLAGS_ZF); 535 else if (p->ainsn.loop.type == 1) /* LOOPE */ 536 match = match && (regs->flags & X86_EFLAGS_ZF); 537 538 __kprobe_emulate_jmp(p, regs, match); 539 } 540 NOKPROBE_SYMBOL(kprobe_emulate_loop); 541 542 static const int addrmode_regoffs[] = { 543 offsetof(struct pt_regs, ax), 544 offsetof(struct pt_regs, cx), 545 offsetof(struct pt_regs, dx), 546 offsetof(struct pt_regs, bx), 547 offsetof(struct pt_regs, sp), 548 offsetof(struct pt_regs, bp), 549 offsetof(struct pt_regs, si), 550 offsetof(struct pt_regs, di), 551 #ifdef CONFIG_X86_64 552 offsetof(struct pt_regs, r8), 553 offsetof(struct pt_regs, r9), 554 offsetof(struct pt_regs, r10), 555 offsetof(struct pt_regs, r11), 556 offsetof(struct pt_regs, r12), 557 offsetof(struct pt_regs, r13), 558 offsetof(struct pt_regs, r14), 559 offsetof(struct pt_regs, r15), 560 #endif 561 }; 562 563 static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs) 564 { 565 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; 566 567 int3_emulate_call(regs, regs_get_register(regs, offs)); 568 } 569 NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); 570 571 static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs) 572 { 573 unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; 574 575 int3_emulate_jmp(regs, regs_get_register(regs, offs)); 576 } 577 NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect); 578 579 static int prepare_emulation(struct kprobe *p, struct insn *insn) 580 { 581 insn_byte_t opcode = insn->opcode.bytes[0]; 582 583 switch (opcode) { 584 case 0xfa: /* cli */ 585 case 0xfb: /* sti */ 586 case 0x9c: /* pushfl */ 587 case 0x9d: /* popf/popfd */ 588 /* 589 * IF modifiers must be emulated since it will enable interrupt while 590 * int3 single stepping. 591 */ 592 p->ainsn.emulate_op = kprobe_emulate_ifmodifiers; 593 p->ainsn.opcode = opcode; 594 break; 595 case 0xc2: /* ret/lret */ 596 case 0xc3: 597 case 0xca: 598 case 0xcb: 599 p->ainsn.emulate_op = kprobe_emulate_ret; 600 break; 601 case 0x9a: /* far call absolute -- segment is not supported */ 602 case 0xea: /* far jmp absolute -- segment is not supported */ 603 case 0xcc: /* int3 */ 604 case 0xcf: /* iret -- in-kernel IRET is not supported */ 605 return -EOPNOTSUPP; 606 break; 607 case 0xe8: /* near call relative */ 608 p->ainsn.emulate_op = kprobe_emulate_call; 609 if (insn->immediate.nbytes == 2) 610 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 611 else 612 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 613 break; 614 case 0xeb: /* short jump relative */ 615 case 0xe9: /* near jump relative */ 616 p->ainsn.emulate_op = kprobe_emulate_jmp; 617 if (insn->immediate.nbytes == 1) 618 p->ainsn.rel32 = *(s8 *)&insn->immediate.value; 619 else if (insn->immediate.nbytes == 2) 620 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 621 else 622 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 623 break; 624 case 0x70 ... 0x7f: 625 /* 1 byte conditional jump */ 626 p->ainsn.emulate_op = kprobe_emulate_jcc; 627 p->ainsn.jcc.type = opcode & 0xf; 628 p->ainsn.rel32 = *(char *)insn->immediate.bytes; 629 break; 630 case 0x0f: 631 opcode = insn->opcode.bytes[1]; 632 if ((opcode & 0xf0) == 0x80) { 633 /* 2 bytes Conditional Jump */ 634 p->ainsn.emulate_op = kprobe_emulate_jcc; 635 p->ainsn.jcc.type = opcode & 0xf; 636 if (insn->immediate.nbytes == 2) 637 p->ainsn.rel32 = *(s16 *)&insn->immediate.value; 638 else 639 p->ainsn.rel32 = *(s32 *)&insn->immediate.value; 640 } else if (opcode == 0x01 && 641 X86_MODRM_REG(insn->modrm.bytes[0]) == 0 && 642 X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) { 643 /* VM extensions - not supported */ 644 return -EOPNOTSUPP; 645 } 646 break; 647 case 0xe0: /* Loop NZ */ 648 case 0xe1: /* Loop */ 649 case 0xe2: /* Loop */ 650 case 0xe3: /* J*CXZ */ 651 p->ainsn.emulate_op = kprobe_emulate_loop; 652 p->ainsn.loop.type = opcode & 0x3; 653 p->ainsn.loop.asize = insn->addr_bytes * 8; 654 p->ainsn.rel32 = *(s8 *)&insn->immediate.value; 655 break; 656 case 0xff: 657 /* 658 * Since the 0xff is an extended group opcode, the instruction 659 * is determined by the MOD/RM byte. 660 */ 661 opcode = insn->modrm.bytes[0]; 662 if ((opcode & 0x30) == 0x10) { 663 if ((opcode & 0x8) == 0x8) 664 return -EOPNOTSUPP; /* far call */ 665 /* call absolute, indirect */ 666 p->ainsn.emulate_op = kprobe_emulate_call_indirect; 667 } else if ((opcode & 0x30) == 0x20) { 668 if ((opcode & 0x8) == 0x8) 669 return -EOPNOTSUPP; /* far jmp */ 670 /* jmp near absolute indirect */ 671 p->ainsn.emulate_op = kprobe_emulate_jmp_indirect; 672 } else 673 break; 674 675 if (insn->addr_bytes != sizeof(unsigned long)) 676 return -EOPNOTSUPP; /* Don't support different size */ 677 if (X86_MODRM_MOD(opcode) != 3) 678 return -EOPNOTSUPP; /* TODO: support memory addressing */ 679 680 p->ainsn.indirect.reg = X86_MODRM_RM(opcode); 681 #ifdef CONFIG_X86_64 682 if (X86_REX_B(insn->rex_prefix.value)) 683 p->ainsn.indirect.reg += 8; 684 #endif 685 break; 686 default: 687 break; 688 } 689 p->ainsn.size = insn->length; 690 691 return 0; 692 } 693 694 static int arch_copy_kprobe(struct kprobe *p) 695 { 696 struct insn insn; 697 kprobe_opcode_t buf[MAX_INSN_SIZE]; 698 int ret, len; 699 700 /* Copy an instruction with recovering if other optprobe modifies it.*/ 701 len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn); 702 if (!len) 703 return -EINVAL; 704 705 /* Analyze the opcode and setup emulate functions */ 706 ret = prepare_emulation(p, &insn); 707 if (ret < 0) 708 return ret; 709 710 /* Add int3 for single-step or booster jmp */ 711 len = prepare_singlestep(buf, p, &insn); 712 if (len < 0) 713 return len; 714 715 /* Also, displacement change doesn't affect the first byte */ 716 p->opcode = buf[0]; 717 718 p->ainsn.tp_len = len; 719 perf_event_text_poke(p->ainsn.insn, NULL, 0, buf, len); 720 721 /* OK, write back the instruction(s) into ROX insn buffer */ 722 text_poke(p->ainsn.insn, buf, len); 723 724 return 0; 725 } 726 727 int arch_prepare_kprobe(struct kprobe *p) 728 { 729 int ret; 730 731 if (alternatives_text_reserved(p->addr, p->addr)) 732 return -EINVAL; 733 734 if (!can_probe((unsigned long)p->addr)) 735 return -EILSEQ; 736 737 memset(&p->ainsn, 0, sizeof(p->ainsn)); 738 739 /* insn: must be on special executable page on x86. */ 740 p->ainsn.insn = get_insn_slot(); 741 if (!p->ainsn.insn) 742 return -ENOMEM; 743 744 ret = arch_copy_kprobe(p); 745 if (ret) { 746 free_insn_slot(p->ainsn.insn, 0); 747 p->ainsn.insn = NULL; 748 } 749 750 return ret; 751 } 752 753 void arch_arm_kprobe(struct kprobe *p) 754 { 755 u8 int3 = INT3_INSN_OPCODE; 756 757 text_poke(p->addr, &int3, 1); 758 text_poke_sync(); 759 perf_event_text_poke(p->addr, &p->opcode, 1, &int3, 1); 760 } 761 762 void arch_disarm_kprobe(struct kprobe *p) 763 { 764 u8 int3 = INT3_INSN_OPCODE; 765 766 perf_event_text_poke(p->addr, &int3, 1, &p->opcode, 1); 767 text_poke(p->addr, &p->opcode, 1); 768 text_poke_sync(); 769 } 770 771 void arch_remove_kprobe(struct kprobe *p) 772 { 773 if (p->ainsn.insn) { 774 /* Record the perf event before freeing the slot */ 775 perf_event_text_poke(p->ainsn.insn, p->ainsn.insn, 776 p->ainsn.tp_len, NULL, 0); 777 free_insn_slot(p->ainsn.insn, p->ainsn.boostable); 778 p->ainsn.insn = NULL; 779 } 780 } 781 782 static nokprobe_inline void 783 save_previous_kprobe(struct kprobe_ctlblk *kcb) 784 { 785 kcb->prev_kprobe.kp = kprobe_running(); 786 kcb->prev_kprobe.status = kcb->kprobe_status; 787 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags; 788 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; 789 } 790 791 static nokprobe_inline void 792 restore_previous_kprobe(struct kprobe_ctlblk *kcb) 793 { 794 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 795 kcb->kprobe_status = kcb->prev_kprobe.status; 796 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags; 797 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; 798 } 799 800 static nokprobe_inline void 801 set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 802 struct kprobe_ctlblk *kcb) 803 { 804 __this_cpu_write(current_kprobe, p); 805 kcb->kprobe_saved_flags = kcb->kprobe_old_flags 806 = (regs->flags & X86_EFLAGS_IF); 807 } 808 809 static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs, 810 struct kprobe_ctlblk *kcb) 811 { 812 /* Restore back the original saved kprobes variables and continue. */ 813 if (kcb->kprobe_status == KPROBE_REENTER) { 814 /* This will restore both kcb and current_kprobe */ 815 restore_previous_kprobe(kcb); 816 } else { 817 /* 818 * Always update the kcb status because 819 * reset_curent_kprobe() doesn't update kcb. 820 */ 821 kcb->kprobe_status = KPROBE_HIT_SSDONE; 822 if (cur->post_handler) 823 cur->post_handler(cur, regs, 0); 824 reset_current_kprobe(); 825 } 826 } 827 NOKPROBE_SYMBOL(kprobe_post_process); 828 829 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, 830 struct kprobe_ctlblk *kcb, int reenter) 831 { 832 if (setup_detour_execution(p, regs, reenter)) 833 return; 834 835 #if !defined(CONFIG_PREEMPTION) 836 if (p->ainsn.boostable) { 837 /* Boost up -- we can execute copied instructions directly */ 838 if (!reenter) 839 reset_current_kprobe(); 840 /* 841 * Reentering boosted probe doesn't reset current_kprobe, 842 * nor set current_kprobe, because it doesn't use single 843 * stepping. 844 */ 845 regs->ip = (unsigned long)p->ainsn.insn; 846 return; 847 } 848 #endif 849 if (reenter) { 850 save_previous_kprobe(kcb); 851 set_current_kprobe(p, regs, kcb); 852 kcb->kprobe_status = KPROBE_REENTER; 853 } else 854 kcb->kprobe_status = KPROBE_HIT_SS; 855 856 if (p->ainsn.emulate_op) { 857 p->ainsn.emulate_op(p, regs); 858 kprobe_post_process(p, regs, kcb); 859 return; 860 } 861 862 /* Disable interrupt, and set ip register on trampoline */ 863 regs->flags &= ~X86_EFLAGS_IF; 864 regs->ip = (unsigned long)p->ainsn.insn; 865 } 866 NOKPROBE_SYMBOL(setup_singlestep); 867 868 /* 869 * Called after single-stepping. p->addr is the address of the 870 * instruction whose first byte has been replaced by the "int3" 871 * instruction. To avoid the SMP problems that can occur when we 872 * temporarily put back the original opcode to single-step, we 873 * single-stepped a copy of the instruction. The address of this 874 * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again 875 * right after the copied instruction. 876 * Different from the trap single-step, "int3" single-step can not 877 * handle the instruction which changes the ip register, e.g. jmp, 878 * call, conditional jmp, and the instructions which changes the IF 879 * flags because interrupt must be disabled around the single-stepping. 880 * Such instructions are software emulated, but others are single-stepped 881 * using "int3". 882 * 883 * When the 2nd "int3" handled, the regs->ip and regs->flags needs to 884 * be adjusted, so that we can resume execution on correct code. 885 */ 886 static void resume_singlestep(struct kprobe *p, struct pt_regs *regs, 887 struct kprobe_ctlblk *kcb) 888 { 889 unsigned long copy_ip = (unsigned long)p->ainsn.insn; 890 unsigned long orig_ip = (unsigned long)p->addr; 891 892 /* Restore saved interrupt flag and ip register */ 893 regs->flags |= kcb->kprobe_saved_flags; 894 /* Note that regs->ip is executed int3 so must be a step back */ 895 regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE; 896 } 897 NOKPROBE_SYMBOL(resume_singlestep); 898 899 /* 900 * We have reentered the kprobe_handler(), since another probe was hit while 901 * within the handler. We save the original kprobes variables and just single 902 * step on the instruction of the new probe without calling any user handlers. 903 */ 904 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, 905 struct kprobe_ctlblk *kcb) 906 { 907 switch (kcb->kprobe_status) { 908 case KPROBE_HIT_SSDONE: 909 case KPROBE_HIT_ACTIVE: 910 case KPROBE_HIT_SS: 911 kprobes_inc_nmissed_count(p); 912 setup_singlestep(p, regs, kcb, 1); 913 break; 914 case KPROBE_REENTER: 915 /* A probe has been hit in the codepath leading up to, or just 916 * after, single-stepping of a probed instruction. This entire 917 * codepath should strictly reside in .kprobes.text section. 918 * Raise a BUG or we'll continue in an endless reentering loop 919 * and eventually a stack overflow. 920 */ 921 pr_err("Unrecoverable kprobe detected.\n"); 922 dump_kprobe(p); 923 BUG(); 924 default: 925 /* impossible cases */ 926 WARN_ON(1); 927 return 0; 928 } 929 930 return 1; 931 } 932 NOKPROBE_SYMBOL(reenter_kprobe); 933 934 static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb) 935 { 936 return (kcb->kprobe_status == KPROBE_HIT_SS || 937 kcb->kprobe_status == KPROBE_REENTER); 938 } 939 940 /* 941 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 942 * remain disabled throughout this function. 943 */ 944 int kprobe_int3_handler(struct pt_regs *regs) 945 { 946 kprobe_opcode_t *addr; 947 struct kprobe *p; 948 struct kprobe_ctlblk *kcb; 949 950 if (user_mode(regs)) 951 return 0; 952 953 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 954 /* 955 * We don't want to be preempted for the entire duration of kprobe 956 * processing. Since int3 and debug trap disables irqs and we clear 957 * IF while singlestepping, it must be no preemptible. 958 */ 959 960 kcb = get_kprobe_ctlblk(); 961 p = get_kprobe(addr); 962 963 if (p) { 964 if (kprobe_running()) { 965 if (reenter_kprobe(p, regs, kcb)) 966 return 1; 967 } else { 968 set_current_kprobe(p, regs, kcb); 969 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 970 971 /* 972 * If we have no pre-handler or it returned 0, we 973 * continue with normal processing. If we have a 974 * pre-handler and it returned non-zero, that means 975 * user handler setup registers to exit to another 976 * instruction, we must skip the single stepping. 977 */ 978 if (!p->pre_handler || !p->pre_handler(p, regs)) 979 setup_singlestep(p, regs, kcb, 0); 980 else 981 reset_current_kprobe(); 982 return 1; 983 } 984 } else if (kprobe_is_ss(kcb)) { 985 p = kprobe_running(); 986 if ((unsigned long)p->ainsn.insn < regs->ip && 987 (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) { 988 /* Most provably this is the second int3 for singlestep */ 989 resume_singlestep(p, regs, kcb); 990 kprobe_post_process(p, regs, kcb); 991 return 1; 992 } 993 } 994 995 if (*addr != INT3_INSN_OPCODE) { 996 /* 997 * The breakpoint instruction was removed right 998 * after we hit it. Another cpu has removed 999 * either a probepoint or a debugger breakpoint 1000 * at this address. In either case, no further 1001 * handling of this interrupt is appropriate. 1002 * Back up over the (now missing) int3 and run 1003 * the original instruction. 1004 */ 1005 regs->ip = (unsigned long)addr; 1006 return 1; 1007 } /* else: not a kprobe fault; let the kernel handle it */ 1008 1009 return 0; 1010 } 1011 NOKPROBE_SYMBOL(kprobe_int3_handler); 1012 1013 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 1014 { 1015 struct kprobe *cur = kprobe_running(); 1016 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1017 1018 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) { 1019 /* This must happen on single-stepping */ 1020 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS && 1021 kcb->kprobe_status != KPROBE_REENTER); 1022 /* 1023 * We are here because the instruction being single 1024 * stepped caused a page fault. We reset the current 1025 * kprobe and the ip points back to the probe address 1026 * and allow the page fault handler to continue as a 1027 * normal page fault. 1028 */ 1029 regs->ip = (unsigned long)cur->addr; 1030 1031 /* 1032 * If the IF flag was set before the kprobe hit, 1033 * don't touch it: 1034 */ 1035 regs->flags |= kcb->kprobe_old_flags; 1036 1037 if (kcb->kprobe_status == KPROBE_REENTER) 1038 restore_previous_kprobe(kcb); 1039 else 1040 reset_current_kprobe(); 1041 } 1042 1043 return 0; 1044 } 1045 NOKPROBE_SYMBOL(kprobe_fault_handler); 1046 1047 int __init arch_populate_kprobe_blacklist(void) 1048 { 1049 return kprobe_add_area_blacklist((unsigned long)__entry_text_start, 1050 (unsigned long)__entry_text_end); 1051 } 1052 1053 int __init arch_init_kprobes(void) 1054 { 1055 return 0; 1056 } 1057 1058 int arch_trampoline_kprobe(struct kprobe *p) 1059 { 1060 return 0; 1061 } 1062