1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) "SMP alternatives: " fmt 3 4 #include <linux/module.h> 5 #include <linux/sched.h> 6 #include <linux/perf_event.h> 7 #include <linux/mutex.h> 8 #include <linux/list.h> 9 #include <linux/stringify.h> 10 #include <linux/highmem.h> 11 #include <linux/mm.h> 12 #include <linux/vmalloc.h> 13 #include <linux/memory.h> 14 #include <linux/stop_machine.h> 15 #include <linux/slab.h> 16 #include <linux/kdebug.h> 17 #include <linux/kprobes.h> 18 #include <linux/mmu_context.h> 19 #include <linux/bsearch.h> 20 #include <linux/sync_core.h> 21 #include <asm/text-patching.h> 22 #include <asm/alternative.h> 23 #include <asm/sections.h> 24 #include <asm/mce.h> 25 #include <asm/nmi.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/insn.h> 29 #include <asm/io.h> 30 #include <asm/fixmap.h> 31 #include <asm/paravirt.h> 32 #include <asm/asm-prototypes.h> 33 #include <asm/cfi.h> 34 35 int __read_mostly alternatives_patched; 36 37 EXPORT_SYMBOL_GPL(alternatives_patched); 38 39 #define MAX_PATCH_LEN (255-1) 40 41 #define DA_ALL (~0) 42 #define DA_ALT 0x01 43 #define DA_RET 0x02 44 #define DA_RETPOLINE 0x04 45 #define DA_ENDBR 0x08 46 #define DA_SMP 0x10 47 48 static unsigned int debug_alternative; 49 50 static int __init debug_alt(char *str) 51 { 52 if (str && *str == '=') 53 str++; 54 55 if (!str || kstrtouint(str, 0, &debug_alternative)) 56 debug_alternative = DA_ALL; 57 58 return 1; 59 } 60 __setup("debug-alternative", debug_alt); 61 62 static int noreplace_smp; 63 64 static int __init setup_noreplace_smp(char *str) 65 { 66 noreplace_smp = 1; 67 return 1; 68 } 69 __setup("noreplace-smp", setup_noreplace_smp); 70 71 #define DPRINTK(type, fmt, args...) \ 72 do { \ 73 if (debug_alternative & DA_##type) \ 74 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \ 75 } while (0) 76 77 #define DUMP_BYTES(type, buf, len, fmt, args...) \ 78 do { \ 79 if (unlikely(debug_alternative & DA_##type)) { \ 80 int j; \ 81 \ 82 if (!(len)) \ 83 break; \ 84 \ 85 printk(KERN_DEBUG pr_fmt(fmt), ##args); \ 86 for (j = 0; j < (len) - 1; j++) \ 87 printk(KERN_CONT "%02hhx ", buf[j]); \ 88 printk(KERN_CONT "%02hhx\n", buf[j]); \ 89 } \ 90 } while (0) 91 92 static const unsigned char x86nops[] = 93 { 94 BYTES_NOP1, 95 BYTES_NOP2, 96 BYTES_NOP3, 97 BYTES_NOP4, 98 BYTES_NOP5, 99 BYTES_NOP6, 100 BYTES_NOP7, 101 BYTES_NOP8, 102 #ifdef CONFIG_64BIT 103 BYTES_NOP9, 104 BYTES_NOP10, 105 BYTES_NOP11, 106 #endif 107 }; 108 109 const unsigned char * const x86_nops[ASM_NOP_MAX+1] = 110 { 111 NULL, 112 x86nops, 113 x86nops + 1, 114 x86nops + 1 + 2, 115 x86nops + 1 + 2 + 3, 116 x86nops + 1 + 2 + 3 + 4, 117 x86nops + 1 + 2 + 3 + 4 + 5, 118 x86nops + 1 + 2 + 3 + 4 + 5 + 6, 119 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 120 #ifdef CONFIG_64BIT 121 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 122 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9, 123 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10, 124 #endif 125 }; 126 127 /* 128 * Nomenclature for variable names to simplify and clarify this code and ease 129 * any potential staring at it: 130 * 131 * @instr: source address of the original instructions in the kernel text as 132 * generated by the compiler. 133 * 134 * @buf: temporary buffer on which the patching operates. This buffer is 135 * eventually text-poked into the kernel image. 136 * 137 * @replacement/@repl: pointer to the opcodes which are replacing @instr, located 138 * in the .altinstr_replacement section. 139 */ 140 141 /* 142 * Fill the buffer with a single effective instruction of size @len. 143 * 144 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info) 145 * for every single-byte NOP, try to generate the maximally available NOP of 146 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for 147 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and 148 * *jump* over instead of executing long and daft NOPs. 149 */ 150 static void add_nop(u8 *buf, unsigned int len) 151 { 152 u8 *target = buf + len; 153 154 if (!len) 155 return; 156 157 if (len <= ASM_NOP_MAX) { 158 memcpy(buf, x86_nops[len], len); 159 return; 160 } 161 162 if (len < 128) { 163 __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE); 164 buf += JMP8_INSN_SIZE; 165 } else { 166 __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE); 167 buf += JMP32_INSN_SIZE; 168 } 169 170 for (;buf < target; buf++) 171 *buf = INT3_INSN_OPCODE; 172 } 173 174 extern s32 __retpoline_sites[], __retpoline_sites_end[]; 175 extern s32 __return_sites[], __return_sites_end[]; 176 extern s32 __cfi_sites[], __cfi_sites_end[]; 177 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; 178 extern s32 __smp_locks[], __smp_locks_end[]; 179 void text_poke_early(void *addr, const void *opcode, size_t len); 180 181 /* 182 * Matches NOP and NOPL, not any of the other possible NOPs. 183 */ 184 static bool insn_is_nop(struct insn *insn) 185 { 186 /* Anything NOP, but no REP NOP */ 187 if (insn->opcode.bytes[0] == 0x90 && 188 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3)) 189 return true; 190 191 /* NOPL */ 192 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F) 193 return true; 194 195 /* TODO: more nops */ 196 197 return false; 198 } 199 200 /* 201 * Find the offset of the first non-NOP instruction starting at @offset 202 * but no further than @len. 203 */ 204 static int skip_nops(u8 *buf, int offset, int len) 205 { 206 struct insn insn; 207 208 for (; offset < len; offset += insn.length) { 209 if (insn_decode_kernel(&insn, &buf[offset])) 210 break; 211 212 if (!insn_is_nop(&insn)) 213 break; 214 } 215 216 return offset; 217 } 218 219 /* 220 * "noinline" to cause control flow change and thus invalidate I$ and 221 * cause refetch after modification. 222 */ 223 static void noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len) 224 { 225 for (int next, i = 0; i < len; i = next) { 226 struct insn insn; 227 228 if (insn_decode_kernel(&insn, &buf[i])) 229 return; 230 231 next = i + insn.length; 232 233 if (insn_is_nop(&insn)) { 234 int nop = i; 235 236 /* Has the NOP already been optimized? */ 237 if (i + insn.length == len) 238 return; 239 240 next = skip_nops(buf, next, len); 241 242 add_nop(buf + nop, next - nop); 243 DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, next); 244 } 245 } 246 } 247 248 /* 249 * In this context, "source" is where the instructions are placed in the 250 * section .altinstr_replacement, for example during kernel build by the 251 * toolchain. 252 * "Destination" is where the instructions are being patched in by this 253 * machinery. 254 * 255 * The source offset is: 256 * 257 * src_imm = target - src_next_ip (1) 258 * 259 * and the target offset is: 260 * 261 * dst_imm = target - dst_next_ip (2) 262 * 263 * so rework (1) as an expression for target like: 264 * 265 * target = src_imm + src_next_ip (1a) 266 * 267 * and substitute in (2) to get: 268 * 269 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3) 270 * 271 * Now, since the instruction stream is 'identical' at src and dst (it 272 * is being copied after all) it can be stated that: 273 * 274 * src_next_ip = src + ip_offset 275 * dst_next_ip = dst + ip_offset (4) 276 * 277 * Substitute (4) in (3) and observe ip_offset being cancelled out to 278 * obtain: 279 * 280 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset) 281 * = src_imm + src - dst + ip_offset - ip_offset 282 * = src_imm + src - dst (5) 283 * 284 * IOW, only the relative displacement of the code block matters. 285 */ 286 287 #define apply_reloc_n(n_, p_, d_) \ 288 do { \ 289 s32 v = *(s##n_ *)(p_); \ 290 v += (d_); \ 291 BUG_ON((v >> 31) != (v >> (n_-1))); \ 292 *(s##n_ *)(p_) = (s##n_)v; \ 293 } while (0) 294 295 296 static __always_inline 297 void apply_reloc(int n, void *ptr, uintptr_t diff) 298 { 299 switch (n) { 300 case 1: apply_reloc_n(8, ptr, diff); break; 301 case 2: apply_reloc_n(16, ptr, diff); break; 302 case 4: apply_reloc_n(32, ptr, diff); break; 303 default: BUG(); 304 } 305 } 306 307 static __always_inline 308 bool need_reloc(unsigned long offset, u8 *src, size_t src_len) 309 { 310 u8 *target = src + offset; 311 /* 312 * If the target is inside the patched block, it's relative to the 313 * block itself and does not need relocation. 314 */ 315 return (target < src || target > src + src_len); 316 } 317 318 static void __apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len) 319 { 320 for (int next, i = 0; i < instrlen; i = next) { 321 struct insn insn; 322 323 if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i]))) 324 return; 325 326 next = i + insn.length; 327 328 switch (insn.opcode.bytes[0]) { 329 case 0x0f: 330 if (insn.opcode.bytes[1] < 0x80 || 331 insn.opcode.bytes[1] > 0x8f) 332 break; 333 334 fallthrough; /* Jcc.d32 */ 335 case 0x70 ... 0x7f: /* Jcc.d8 */ 336 case JMP8_INSN_OPCODE: 337 case JMP32_INSN_OPCODE: 338 case CALL_INSN_OPCODE: 339 if (need_reloc(next + insn.immediate.value, repl, repl_len)) { 340 apply_reloc(insn.immediate.nbytes, 341 buf + i + insn_offset_immediate(&insn), 342 repl - instr); 343 } 344 345 /* 346 * Where possible, convert JMP.d32 into JMP.d8. 347 */ 348 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) { 349 s32 imm = insn.immediate.value; 350 imm += repl - instr; 351 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE; 352 if ((imm >> 31) == (imm >> 7)) { 353 buf[i+0] = JMP8_INSN_OPCODE; 354 buf[i+1] = (s8)imm; 355 356 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2); 357 } 358 } 359 break; 360 } 361 362 if (insn_rip_relative(&insn)) { 363 if (need_reloc(next + insn.displacement.value, repl, repl_len)) { 364 apply_reloc(insn.displacement.nbytes, 365 buf + i + insn_offset_displacement(&insn), 366 repl - instr); 367 } 368 } 369 } 370 } 371 372 void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len) 373 { 374 __apply_relocation(buf, instr, instrlen, repl, repl_len); 375 optimize_nops(instr, buf, instrlen); 376 } 377 378 /* Low-level backend functions usable from alternative code replacements. */ 379 DEFINE_ASM_FUNC(nop_func, "", .entry.text); 380 EXPORT_SYMBOL_GPL(nop_func); 381 382 noinstr void BUG_func(void) 383 { 384 BUG(); 385 } 386 EXPORT_SYMBOL(BUG_func); 387 388 #define CALL_RIP_REL_OPCODE 0xff 389 #define CALL_RIP_REL_MODRM 0x15 390 391 /* 392 * Rewrite the "call BUG_func" replacement to point to the target of the 393 * indirect pv_ops call "call *disp(%ip)". 394 */ 395 static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a) 396 { 397 void *target, *bug = &BUG_func; 398 s32 disp; 399 400 if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) { 401 pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n"); 402 BUG(); 403 } 404 405 if (a->instrlen != 6 || 406 instr[0] != CALL_RIP_REL_OPCODE || 407 instr[1] != CALL_RIP_REL_MODRM) { 408 pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n"); 409 BUG(); 410 } 411 412 /* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */ 413 disp = *(s32 *)(instr + 2); 414 #ifdef CONFIG_X86_64 415 /* ff 15 00 00 00 00 call *0x0(%rip) */ 416 /* target address is stored at "next instruction + disp". */ 417 target = *(void **)(instr + a->instrlen + disp); 418 #else 419 /* ff 15 00 00 00 00 call *0x0 */ 420 /* target address is stored at disp. */ 421 target = *(void **)disp; 422 #endif 423 if (!target) 424 target = bug; 425 426 /* (BUG_func - .) + (target - BUG_func) := target - . */ 427 *(s32 *)(insn_buff + 1) += target - bug; 428 429 if (target == &nop_func) 430 return 0; 431 432 return 5; 433 } 434 435 /* 436 * Replace instructions with better alternatives for this CPU type. This runs 437 * before SMP is initialized to avoid SMP problems with self modifying code. 438 * This implies that asymmetric systems where APs have less capabilities than 439 * the boot processor are not handled. Tough. Make sure you disable such 440 * features by hand. 441 * 442 * Marked "noinline" to cause control flow change and thus insn cache 443 * to refetch changed I$ lines. 444 */ 445 void __init_or_module noinline apply_alternatives(struct alt_instr *start, 446 struct alt_instr *end) 447 { 448 u8 insn_buff[MAX_PATCH_LEN]; 449 u8 *instr, *replacement; 450 struct alt_instr *a; 451 452 DPRINTK(ALT, "alt table %px, -> %px", start, end); 453 454 /* 455 * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using 456 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. 457 * During the process, KASAN becomes confused seeing partial LA57 458 * conversion and triggers a false-positive out-of-bound report. 459 * 460 * Disable KASAN until the patching is complete. 461 */ 462 kasan_disable_current(); 463 464 /* 465 * The scan order should be from start to end. A later scanned 466 * alternative code can overwrite previously scanned alternative code. 467 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 468 * patch code. 469 * 470 * So be careful if you want to change the scan order to any other 471 * order. 472 */ 473 for (a = start; a < end; a++) { 474 int insn_buff_sz = 0; 475 476 instr = (u8 *)&a->instr_offset + a->instr_offset; 477 replacement = (u8 *)&a->repl_offset + a->repl_offset; 478 BUG_ON(a->instrlen > sizeof(insn_buff)); 479 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 480 481 /* 482 * Patch if either: 483 * - feature is present 484 * - feature not present but ALT_FLAG_NOT is set to mean, 485 * patch if feature is *NOT* present. 486 */ 487 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { 488 memcpy(insn_buff, instr, a->instrlen); 489 optimize_nops(instr, insn_buff, a->instrlen); 490 text_poke_early(instr, insn_buff, a->instrlen); 491 continue; 492 } 493 494 DPRINTK(ALT, "feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d) flags: 0x%x", 495 a->cpuid >> 5, 496 a->cpuid & 0x1f, 497 instr, instr, a->instrlen, 498 replacement, a->replacementlen, a->flags); 499 500 memcpy(insn_buff, replacement, a->replacementlen); 501 insn_buff_sz = a->replacementlen; 502 503 if (a->flags & ALT_FLAG_DIRECT_CALL) { 504 insn_buff_sz = alt_replace_call(instr, insn_buff, a); 505 if (insn_buff_sz < 0) 506 continue; 507 } 508 509 for (; insn_buff_sz < a->instrlen; insn_buff_sz++) 510 insn_buff[insn_buff_sz] = 0x90; 511 512 apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen); 513 514 DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr); 515 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement); 516 DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr); 517 518 text_poke_early(instr, insn_buff, insn_buff_sz); 519 } 520 521 kasan_enable_current(); 522 } 523 524 static inline bool is_jcc32(struct insn *insn) 525 { 526 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ 527 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; 528 } 529 530 #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL) 531 532 /* 533 * CALL/JMP *%\reg 534 */ 535 static int emit_indirect(int op, int reg, u8 *bytes) 536 { 537 int i = 0; 538 u8 modrm; 539 540 switch (op) { 541 case CALL_INSN_OPCODE: 542 modrm = 0x10; /* Reg = 2; CALL r/m */ 543 break; 544 545 case JMP32_INSN_OPCODE: 546 modrm = 0x20; /* Reg = 4; JMP r/m */ 547 break; 548 549 default: 550 WARN_ON_ONCE(1); 551 return -1; 552 } 553 554 if (reg >= 8) { 555 bytes[i++] = 0x41; /* REX.B prefix */ 556 reg -= 8; 557 } 558 559 modrm |= 0xc0; /* Mod = 3 */ 560 modrm += reg; 561 562 bytes[i++] = 0xff; /* opcode */ 563 bytes[i++] = modrm; 564 565 return i; 566 } 567 568 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) 569 { 570 u8 op = insn->opcode.bytes[0]; 571 int i = 0; 572 573 /* 574 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional 575 * tail-calls. Deal with them. 576 */ 577 if (is_jcc32(insn)) { 578 bytes[i++] = op; 579 op = insn->opcode.bytes[1]; 580 goto clang_jcc; 581 } 582 583 if (insn->length == 6) 584 bytes[i++] = 0x2e; /* CS-prefix */ 585 586 switch (op) { 587 case CALL_INSN_OPCODE: 588 __text_gen_insn(bytes+i, op, addr+i, 589 __x86_indirect_call_thunk_array[reg], 590 CALL_INSN_SIZE); 591 i += CALL_INSN_SIZE; 592 break; 593 594 case JMP32_INSN_OPCODE: 595 clang_jcc: 596 __text_gen_insn(bytes+i, op, addr+i, 597 __x86_indirect_jump_thunk_array[reg], 598 JMP32_INSN_SIZE); 599 i += JMP32_INSN_SIZE; 600 break; 601 602 default: 603 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr); 604 return -1; 605 } 606 607 WARN_ON_ONCE(i != insn->length); 608 609 return i; 610 } 611 612 /* 613 * Rewrite the compiler generated retpoline thunk calls. 614 * 615 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate 616 * indirect instructions, avoiding the extra indirection. 617 * 618 * For example, convert: 619 * 620 * CALL __x86_indirect_thunk_\reg 621 * 622 * into: 623 * 624 * CALL *%\reg 625 * 626 * It also tries to inline spectre_v2=retpoline,lfence when size permits. 627 */ 628 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) 629 { 630 retpoline_thunk_t *target; 631 int reg, ret, i = 0; 632 u8 op, cc; 633 634 target = addr + insn->length + insn->immediate.value; 635 reg = target - __x86_indirect_thunk_array; 636 637 if (WARN_ON_ONCE(reg & ~0xf)) 638 return -1; 639 640 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */ 641 BUG_ON(reg == 4); 642 643 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && 644 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 645 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 646 return emit_call_track_retpoline(addr, insn, reg, bytes); 647 648 return -1; 649 } 650 651 op = insn->opcode.bytes[0]; 652 653 /* 654 * Convert: 655 * 656 * Jcc.d32 __x86_indirect_thunk_\reg 657 * 658 * into: 659 * 660 * Jncc.d8 1f 661 * [ LFENCE ] 662 * JMP *%\reg 663 * [ NOP ] 664 * 1: 665 */ 666 if (is_jcc32(insn)) { 667 cc = insn->opcode.bytes[1] & 0xf; 668 cc ^= 1; /* invert condition */ 669 670 bytes[i++] = 0x70 + cc; /* Jcc.d8 */ 671 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ 672 673 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */ 674 op = JMP32_INSN_OPCODE; 675 } 676 677 /* 678 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE. 679 */ 680 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 681 bytes[i++] = 0x0f; 682 bytes[i++] = 0xae; 683 bytes[i++] = 0xe8; /* LFENCE */ 684 } 685 686 ret = emit_indirect(op, reg, bytes + i); 687 if (ret < 0) 688 return ret; 689 i += ret; 690 691 /* 692 * The compiler is supposed to EMIT an INT3 after every unconditional 693 * JMP instruction due to AMD BTC. However, if the compiler is too old 694 * or MITIGATION_SLS isn't enabled, we still need an INT3 after 695 * indirect JMPs even on Intel. 696 */ 697 if (op == JMP32_INSN_OPCODE && i < insn->length) 698 bytes[i++] = INT3_INSN_OPCODE; 699 700 for (; i < insn->length;) 701 bytes[i++] = BYTES_NOP1; 702 703 return i; 704 } 705 706 /* 707 * Generated by 'objtool --retpoline'. 708 */ 709 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) 710 { 711 s32 *s; 712 713 for (s = start; s < end; s++) { 714 void *addr = (void *)s + *s; 715 struct insn insn; 716 int len, ret; 717 u8 bytes[16]; 718 u8 op1, op2; 719 720 ret = insn_decode_kernel(&insn, addr); 721 if (WARN_ON_ONCE(ret < 0)) 722 continue; 723 724 op1 = insn.opcode.bytes[0]; 725 op2 = insn.opcode.bytes[1]; 726 727 switch (op1) { 728 case CALL_INSN_OPCODE: 729 case JMP32_INSN_OPCODE: 730 break; 731 732 case 0x0f: /* escape */ 733 if (op2 >= 0x80 && op2 <= 0x8f) 734 break; 735 fallthrough; 736 default: 737 WARN_ON_ONCE(1); 738 continue; 739 } 740 741 DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS", 742 addr, addr, insn.length, 743 addr + insn.length + insn.immediate.value); 744 745 len = patch_retpoline(addr, &insn, bytes); 746 if (len == insn.length) { 747 optimize_nops(addr, bytes, len); 748 DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr); 749 DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr); 750 text_poke_early(addr, bytes, len); 751 } 752 } 753 } 754 755 #ifdef CONFIG_MITIGATION_RETHUNK 756 757 /* 758 * Rewrite the compiler generated return thunk tail-calls. 759 * 760 * For example, convert: 761 * 762 * JMP __x86_return_thunk 763 * 764 * into: 765 * 766 * RET 767 */ 768 static int patch_return(void *addr, struct insn *insn, u8 *bytes) 769 { 770 int i = 0; 771 772 /* Patch the custom return thunks... */ 773 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 774 i = JMP32_INSN_SIZE; 775 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); 776 } else { 777 /* ... or patch them out if not needed. */ 778 bytes[i++] = RET_INSN_OPCODE; 779 } 780 781 for (; i < insn->length;) 782 bytes[i++] = INT3_INSN_OPCODE; 783 return i; 784 } 785 786 void __init_or_module noinline apply_returns(s32 *start, s32 *end) 787 { 788 s32 *s; 789 790 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) 791 static_call_force_reinit(); 792 793 for (s = start; s < end; s++) { 794 void *dest = NULL, *addr = (void *)s + *s; 795 struct insn insn; 796 int len, ret; 797 u8 bytes[16]; 798 u8 op; 799 800 ret = insn_decode_kernel(&insn, addr); 801 if (WARN_ON_ONCE(ret < 0)) 802 continue; 803 804 op = insn.opcode.bytes[0]; 805 if (op == JMP32_INSN_OPCODE) 806 dest = addr + insn.length + insn.immediate.value; 807 808 if (__static_call_fixup(addr, op, dest) || 809 WARN_ONCE(dest != &__x86_return_thunk, 810 "missing return thunk: %pS-%pS: %*ph", 811 addr, dest, 5, addr)) 812 continue; 813 814 DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS", 815 addr, addr, insn.length, 816 addr + insn.length + insn.immediate.value); 817 818 len = patch_return(addr, &insn, bytes); 819 if (len == insn.length) { 820 DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: ", addr); 821 DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr); 822 text_poke_early(addr, bytes, len); 823 } 824 } 825 } 826 #else 827 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 828 #endif /* CONFIG_MITIGATION_RETHUNK */ 829 830 #else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */ 831 832 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } 833 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 834 835 #endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */ 836 837 #ifdef CONFIG_X86_KERNEL_IBT 838 839 static void poison_cfi(void *addr); 840 841 static void __init_or_module poison_endbr(void *addr, bool warn) 842 { 843 u32 endbr, poison = gen_endbr_poison(); 844 845 if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) 846 return; 847 848 if (!is_endbr(endbr)) { 849 WARN_ON_ONCE(warn); 850 return; 851 } 852 853 DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr); 854 855 /* 856 * When we have IBT, the lack of ENDBR will trigger #CP 857 */ 858 DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr); 859 DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr); 860 text_poke_early(addr, &poison, 4); 861 } 862 863 /* 864 * Generated by: objtool --ibt 865 * 866 * Seal the functions for indirect calls by clobbering the ENDBR instructions 867 * and the kCFI hash value. 868 */ 869 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end) 870 { 871 s32 *s; 872 873 for (s = start; s < end; s++) { 874 void *addr = (void *)s + *s; 875 876 poison_endbr(addr, true); 877 if (IS_ENABLED(CONFIG_FINEIBT)) 878 poison_cfi(addr - 16); 879 } 880 } 881 882 #else 883 884 void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } 885 886 #endif /* CONFIG_X86_KERNEL_IBT */ 887 888 #ifdef CONFIG_FINEIBT 889 #define __CFI_DEFAULT CFI_DEFAULT 890 #elif defined(CONFIG_CFI_CLANG) 891 #define __CFI_DEFAULT CFI_KCFI 892 #else 893 #define __CFI_DEFAULT CFI_OFF 894 #endif 895 896 enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT; 897 898 #ifdef CONFIG_CFI_CLANG 899 struct bpf_insn; 900 901 /* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ 902 extern unsigned int __bpf_prog_runX(const void *ctx, 903 const struct bpf_insn *insn); 904 905 /* 906 * Force a reference to the external symbol so the compiler generates 907 * __kcfi_typid. 908 */ 909 __ADDRESSABLE(__bpf_prog_runX); 910 911 /* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ 912 asm ( 913 " .pushsection .data..ro_after_init,\"aw\",@progbits \n" 914 " .type cfi_bpf_hash,@object \n" 915 " .globl cfi_bpf_hash \n" 916 " .p2align 2, 0x0 \n" 917 "cfi_bpf_hash: \n" 918 " .long __kcfi_typeid___bpf_prog_runX \n" 919 " .size cfi_bpf_hash, 4 \n" 920 " .popsection \n" 921 ); 922 923 /* Must match bpf_callback_t */ 924 extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); 925 926 __ADDRESSABLE(__bpf_callback_fn); 927 928 /* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ 929 asm ( 930 " .pushsection .data..ro_after_init,\"aw\",@progbits \n" 931 " .type cfi_bpf_subprog_hash,@object \n" 932 " .globl cfi_bpf_subprog_hash \n" 933 " .p2align 2, 0x0 \n" 934 "cfi_bpf_subprog_hash: \n" 935 " .long __kcfi_typeid___bpf_callback_fn \n" 936 " .size cfi_bpf_subprog_hash, 4 \n" 937 " .popsection \n" 938 ); 939 940 u32 cfi_get_func_hash(void *func) 941 { 942 u32 hash; 943 944 func -= cfi_get_offset(); 945 switch (cfi_mode) { 946 case CFI_FINEIBT: 947 func += 7; 948 break; 949 case CFI_KCFI: 950 func += 1; 951 break; 952 default: 953 return 0; 954 } 955 956 if (get_kernel_nofault(hash, func)) 957 return 0; 958 959 return hash; 960 } 961 #endif 962 963 #ifdef CONFIG_FINEIBT 964 965 static bool cfi_rand __ro_after_init = true; 966 static u32 cfi_seed __ro_after_init; 967 968 /* 969 * Re-hash the CFI hash with a boot-time seed while making sure the result is 970 * not a valid ENDBR instruction. 971 */ 972 static u32 cfi_rehash(u32 hash) 973 { 974 hash ^= cfi_seed; 975 while (unlikely(is_endbr(hash) || is_endbr(-hash))) { 976 bool lsb = hash & 1; 977 hash >>= 1; 978 if (lsb) 979 hash ^= 0x80200003; 980 } 981 return hash; 982 } 983 984 static __init int cfi_parse_cmdline(char *str) 985 { 986 if (!str) 987 return -EINVAL; 988 989 while (str) { 990 char *next = strchr(str, ','); 991 if (next) { 992 *next = 0; 993 next++; 994 } 995 996 if (!strcmp(str, "auto")) { 997 cfi_mode = CFI_DEFAULT; 998 } else if (!strcmp(str, "off")) { 999 cfi_mode = CFI_OFF; 1000 cfi_rand = false; 1001 } else if (!strcmp(str, "kcfi")) { 1002 cfi_mode = CFI_KCFI; 1003 } else if (!strcmp(str, "fineibt")) { 1004 cfi_mode = CFI_FINEIBT; 1005 } else if (!strcmp(str, "norand")) { 1006 cfi_rand = false; 1007 } else { 1008 pr_err("Ignoring unknown cfi option (%s).", str); 1009 } 1010 1011 str = next; 1012 } 1013 1014 return 0; 1015 } 1016 early_param("cfi", cfi_parse_cmdline); 1017 1018 /* 1019 * kCFI FineIBT 1020 * 1021 * __cfi_\func: __cfi_\func: 1022 * movl $0x12345678,%eax // 5 endbr64 // 4 1023 * nop subl $0x12345678,%r10d // 7 1024 * nop jz 1f // 2 1025 * nop ud2 // 2 1026 * nop 1: nop // 1 1027 * nop 1028 * nop 1029 * nop 1030 * nop 1031 * nop 1032 * nop 1033 * nop 1034 * 1035 * 1036 * caller: caller: 1037 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6 1038 * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4 1039 * je 1f // 2 nop4 // 4 1040 * ud2 // 2 1041 * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5 1042 * 1043 */ 1044 1045 asm( ".pushsection .rodata \n" 1046 "fineibt_preamble_start: \n" 1047 " endbr64 \n" 1048 " subl $0x12345678, %r10d \n" 1049 " je fineibt_preamble_end \n" 1050 " ud2 \n" 1051 " nop \n" 1052 "fineibt_preamble_end: \n" 1053 ".popsection\n" 1054 ); 1055 1056 extern u8 fineibt_preamble_start[]; 1057 extern u8 fineibt_preamble_end[]; 1058 1059 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) 1060 #define fineibt_preamble_hash 7 1061 1062 asm( ".pushsection .rodata \n" 1063 "fineibt_caller_start: \n" 1064 " movl $0x12345678, %r10d \n" 1065 " sub $16, %r11 \n" 1066 ASM_NOP4 1067 "fineibt_caller_end: \n" 1068 ".popsection \n" 1069 ); 1070 1071 extern u8 fineibt_caller_start[]; 1072 extern u8 fineibt_caller_end[]; 1073 1074 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start) 1075 #define fineibt_caller_hash 2 1076 1077 #define fineibt_caller_jmp (fineibt_caller_size - 2) 1078 1079 static u32 decode_preamble_hash(void *addr) 1080 { 1081 u8 *p = addr; 1082 1083 /* b8 78 56 34 12 mov $0x12345678,%eax */ 1084 if (p[0] == 0xb8) 1085 return *(u32 *)(addr + 1); 1086 1087 return 0; /* invalid hash value */ 1088 } 1089 1090 static u32 decode_caller_hash(void *addr) 1091 { 1092 u8 *p = addr; 1093 1094 /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */ 1095 if (p[0] == 0x41 && p[1] == 0xba) 1096 return -*(u32 *)(addr + 2); 1097 1098 /* e8 0c 78 56 34 12 jmp.d8 +12 */ 1099 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp) 1100 return -*(u32 *)(addr + 2); 1101 1102 return 0; /* invalid hash value */ 1103 } 1104 1105 /* .retpoline_sites */ 1106 static int cfi_disable_callers(s32 *start, s32 *end) 1107 { 1108 /* 1109 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate 1110 * in tact for later usage. Also see decode_caller_hash() and 1111 * cfi_rewrite_callers(). 1112 */ 1113 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp }; 1114 s32 *s; 1115 1116 for (s = start; s < end; s++) { 1117 void *addr = (void *)s + *s; 1118 u32 hash; 1119 1120 addr -= fineibt_caller_size; 1121 hash = decode_caller_hash(addr); 1122 if (!hash) /* nocfi callers */ 1123 continue; 1124 1125 text_poke_early(addr, jmp, 2); 1126 } 1127 1128 return 0; 1129 } 1130 1131 static int cfi_enable_callers(s32 *start, s32 *end) 1132 { 1133 /* 1134 * Re-enable kCFI, undo what cfi_disable_callers() did. 1135 */ 1136 const u8 mov[] = { 0x41, 0xba }; 1137 s32 *s; 1138 1139 for (s = start; s < end; s++) { 1140 void *addr = (void *)s + *s; 1141 u32 hash; 1142 1143 addr -= fineibt_caller_size; 1144 hash = decode_caller_hash(addr); 1145 if (!hash) /* nocfi callers */ 1146 continue; 1147 1148 text_poke_early(addr, mov, 2); 1149 } 1150 1151 return 0; 1152 } 1153 1154 /* .cfi_sites */ 1155 static int cfi_rand_preamble(s32 *start, s32 *end) 1156 { 1157 s32 *s; 1158 1159 for (s = start; s < end; s++) { 1160 void *addr = (void *)s + *s; 1161 u32 hash; 1162 1163 hash = decode_preamble_hash(addr); 1164 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", 1165 addr, addr, 5, addr)) 1166 return -EINVAL; 1167 1168 hash = cfi_rehash(hash); 1169 text_poke_early(addr + 1, &hash, 4); 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int cfi_rewrite_preamble(s32 *start, s32 *end) 1176 { 1177 s32 *s; 1178 1179 for (s = start; s < end; s++) { 1180 void *addr = (void *)s + *s; 1181 u32 hash; 1182 1183 hash = decode_preamble_hash(addr); 1184 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", 1185 addr, addr, 5, addr)) 1186 return -EINVAL; 1187 1188 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); 1189 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); 1190 text_poke_early(addr + fineibt_preamble_hash, &hash, 4); 1191 } 1192 1193 return 0; 1194 } 1195 1196 static void cfi_rewrite_endbr(s32 *start, s32 *end) 1197 { 1198 s32 *s; 1199 1200 for (s = start; s < end; s++) { 1201 void *addr = (void *)s + *s; 1202 1203 poison_endbr(addr+16, false); 1204 } 1205 } 1206 1207 /* .retpoline_sites */ 1208 static int cfi_rand_callers(s32 *start, s32 *end) 1209 { 1210 s32 *s; 1211 1212 for (s = start; s < end; s++) { 1213 void *addr = (void *)s + *s; 1214 u32 hash; 1215 1216 addr -= fineibt_caller_size; 1217 hash = decode_caller_hash(addr); 1218 if (hash) { 1219 hash = -cfi_rehash(hash); 1220 text_poke_early(addr + 2, &hash, 4); 1221 } 1222 } 1223 1224 return 0; 1225 } 1226 1227 static int cfi_rewrite_callers(s32 *start, s32 *end) 1228 { 1229 s32 *s; 1230 1231 for (s = start; s < end; s++) { 1232 void *addr = (void *)s + *s; 1233 u32 hash; 1234 1235 addr -= fineibt_caller_size; 1236 hash = decode_caller_hash(addr); 1237 if (hash) { 1238 text_poke_early(addr, fineibt_caller_start, fineibt_caller_size); 1239 WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678); 1240 text_poke_early(addr + fineibt_caller_hash, &hash, 4); 1241 } 1242 /* rely on apply_retpolines() */ 1243 } 1244 1245 return 0; 1246 } 1247 1248 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1249 s32 *start_cfi, s32 *end_cfi, bool builtin) 1250 { 1251 int ret; 1252 1253 if (WARN_ONCE(fineibt_preamble_size != 16, 1254 "FineIBT preamble wrong size: %ld", fineibt_preamble_size)) 1255 return; 1256 1257 if (cfi_mode == CFI_DEFAULT) { 1258 cfi_mode = CFI_KCFI; 1259 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) 1260 cfi_mode = CFI_FINEIBT; 1261 } 1262 1263 /* 1264 * Rewrite the callers to not use the __cfi_ stubs, such that we might 1265 * rewrite them. This disables all CFI. If this succeeds but any of the 1266 * later stages fails, we're without CFI. 1267 */ 1268 ret = cfi_disable_callers(start_retpoline, end_retpoline); 1269 if (ret) 1270 goto err; 1271 1272 if (cfi_rand) { 1273 if (builtin) { 1274 cfi_seed = get_random_u32(); 1275 cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); 1276 cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash); 1277 } 1278 1279 ret = cfi_rand_preamble(start_cfi, end_cfi); 1280 if (ret) 1281 goto err; 1282 1283 ret = cfi_rand_callers(start_retpoline, end_retpoline); 1284 if (ret) 1285 goto err; 1286 } 1287 1288 switch (cfi_mode) { 1289 case CFI_OFF: 1290 if (builtin) 1291 pr_info("Disabling CFI\n"); 1292 return; 1293 1294 case CFI_KCFI: 1295 ret = cfi_enable_callers(start_retpoline, end_retpoline); 1296 if (ret) 1297 goto err; 1298 1299 if (builtin) 1300 pr_info("Using kCFI\n"); 1301 return; 1302 1303 case CFI_FINEIBT: 1304 /* place the FineIBT preamble at func()-16 */ 1305 ret = cfi_rewrite_preamble(start_cfi, end_cfi); 1306 if (ret) 1307 goto err; 1308 1309 /* rewrite the callers to target func()-16 */ 1310 ret = cfi_rewrite_callers(start_retpoline, end_retpoline); 1311 if (ret) 1312 goto err; 1313 1314 /* now that nobody targets func()+0, remove ENDBR there */ 1315 cfi_rewrite_endbr(start_cfi, end_cfi); 1316 1317 if (builtin) 1318 pr_info("Using FineIBT CFI\n"); 1319 return; 1320 1321 default: 1322 break; 1323 } 1324 1325 err: 1326 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n"); 1327 } 1328 1329 static inline void poison_hash(void *addr) 1330 { 1331 *(u32 *)addr = 0; 1332 } 1333 1334 static void poison_cfi(void *addr) 1335 { 1336 switch (cfi_mode) { 1337 case CFI_FINEIBT: 1338 /* 1339 * __cfi_\func: 1340 * osp nopl (%rax) 1341 * subl $0, %r10d 1342 * jz 1f 1343 * ud2 1344 * 1: nop 1345 */ 1346 poison_endbr(addr, false); 1347 poison_hash(addr + fineibt_preamble_hash); 1348 break; 1349 1350 case CFI_KCFI: 1351 /* 1352 * __cfi_\func: 1353 * movl $0, %eax 1354 * .skip 11, 0x90 1355 */ 1356 poison_hash(addr + 1); 1357 break; 1358 1359 default: 1360 break; 1361 } 1362 } 1363 1364 #else 1365 1366 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1367 s32 *start_cfi, s32 *end_cfi, bool builtin) 1368 { 1369 } 1370 1371 #ifdef CONFIG_X86_KERNEL_IBT 1372 static void poison_cfi(void *addr) { } 1373 #endif 1374 1375 #endif 1376 1377 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1378 s32 *start_cfi, s32 *end_cfi) 1379 { 1380 return __apply_fineibt(start_retpoline, end_retpoline, 1381 start_cfi, end_cfi, 1382 /* .builtin = */ false); 1383 } 1384 1385 #ifdef CONFIG_SMP 1386 static void alternatives_smp_lock(const s32 *start, const s32 *end, 1387 u8 *text, u8 *text_end) 1388 { 1389 const s32 *poff; 1390 1391 for (poff = start; poff < end; poff++) { 1392 u8 *ptr = (u8 *)poff + *poff; 1393 1394 if (!*poff || ptr < text || ptr >= text_end) 1395 continue; 1396 /* turn DS segment override prefix into lock prefix */ 1397 if (*ptr == 0x3e) 1398 text_poke(ptr, ((unsigned char []){0xf0}), 1); 1399 } 1400 } 1401 1402 static void alternatives_smp_unlock(const s32 *start, const s32 *end, 1403 u8 *text, u8 *text_end) 1404 { 1405 const s32 *poff; 1406 1407 for (poff = start; poff < end; poff++) { 1408 u8 *ptr = (u8 *)poff + *poff; 1409 1410 if (!*poff || ptr < text || ptr >= text_end) 1411 continue; 1412 /* turn lock prefix into DS segment override prefix */ 1413 if (*ptr == 0xf0) 1414 text_poke(ptr, ((unsigned char []){0x3E}), 1); 1415 } 1416 } 1417 1418 struct smp_alt_module { 1419 /* what is this ??? */ 1420 struct module *mod; 1421 char *name; 1422 1423 /* ptrs to lock prefixes */ 1424 const s32 *locks; 1425 const s32 *locks_end; 1426 1427 /* .text segment, needed to avoid patching init code ;) */ 1428 u8 *text; 1429 u8 *text_end; 1430 1431 struct list_head next; 1432 }; 1433 static LIST_HEAD(smp_alt_modules); 1434 static bool uniproc_patched = false; /* protected by text_mutex */ 1435 1436 void __init_or_module alternatives_smp_module_add(struct module *mod, 1437 char *name, 1438 void *locks, void *locks_end, 1439 void *text, void *text_end) 1440 { 1441 struct smp_alt_module *smp; 1442 1443 mutex_lock(&text_mutex); 1444 if (!uniproc_patched) 1445 goto unlock; 1446 1447 if (num_possible_cpus() == 1) 1448 /* Don't bother remembering, we'll never have to undo it. */ 1449 goto smp_unlock; 1450 1451 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 1452 if (NULL == smp) 1453 /* we'll run the (safe but slow) SMP code then ... */ 1454 goto unlock; 1455 1456 smp->mod = mod; 1457 smp->name = name; 1458 smp->locks = locks; 1459 smp->locks_end = locks_end; 1460 smp->text = text; 1461 smp->text_end = text_end; 1462 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n", 1463 smp->locks, smp->locks_end, 1464 smp->text, smp->text_end, smp->name); 1465 1466 list_add_tail(&smp->next, &smp_alt_modules); 1467 smp_unlock: 1468 alternatives_smp_unlock(locks, locks_end, text, text_end); 1469 unlock: 1470 mutex_unlock(&text_mutex); 1471 } 1472 1473 void __init_or_module alternatives_smp_module_del(struct module *mod) 1474 { 1475 struct smp_alt_module *item; 1476 1477 mutex_lock(&text_mutex); 1478 list_for_each_entry(item, &smp_alt_modules, next) { 1479 if (mod != item->mod) 1480 continue; 1481 list_del(&item->next); 1482 kfree(item); 1483 break; 1484 } 1485 mutex_unlock(&text_mutex); 1486 } 1487 1488 void alternatives_enable_smp(void) 1489 { 1490 struct smp_alt_module *mod; 1491 1492 /* Why bother if there are no other CPUs? */ 1493 BUG_ON(num_possible_cpus() == 1); 1494 1495 mutex_lock(&text_mutex); 1496 1497 if (uniproc_patched) { 1498 pr_info("switching to SMP code\n"); 1499 BUG_ON(num_online_cpus() != 1); 1500 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 1501 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 1502 list_for_each_entry(mod, &smp_alt_modules, next) 1503 alternatives_smp_lock(mod->locks, mod->locks_end, 1504 mod->text, mod->text_end); 1505 uniproc_patched = false; 1506 } 1507 mutex_unlock(&text_mutex); 1508 } 1509 1510 /* 1511 * Return 1 if the address range is reserved for SMP-alternatives. 1512 * Must hold text_mutex. 1513 */ 1514 int alternatives_text_reserved(void *start, void *end) 1515 { 1516 struct smp_alt_module *mod; 1517 const s32 *poff; 1518 u8 *text_start = start; 1519 u8 *text_end = end; 1520 1521 lockdep_assert_held(&text_mutex); 1522 1523 list_for_each_entry(mod, &smp_alt_modules, next) { 1524 if (mod->text > text_end || mod->text_end < text_start) 1525 continue; 1526 for (poff = mod->locks; poff < mod->locks_end; poff++) { 1527 const u8 *ptr = (const u8 *)poff + *poff; 1528 1529 if (text_start <= ptr && text_end > ptr) 1530 return 1; 1531 } 1532 } 1533 1534 return 0; 1535 } 1536 #endif /* CONFIG_SMP */ 1537 1538 /* 1539 * Self-test for the INT3 based CALL emulation code. 1540 * 1541 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up 1542 * properly and that there is a stack gap between the INT3 frame and the 1543 * previous context. Without this gap doing a virtual PUSH on the interrupted 1544 * stack would corrupt the INT3 IRET frame. 1545 * 1546 * See entry_{32,64}.S for more details. 1547 */ 1548 1549 /* 1550 * We define the int3_magic() function in assembly to control the calling 1551 * convention such that we can 'call' it from assembly. 1552 */ 1553 1554 extern void int3_magic(unsigned int *ptr); /* defined in asm */ 1555 1556 asm ( 1557 " .pushsection .init.text, \"ax\", @progbits\n" 1558 " .type int3_magic, @function\n" 1559 "int3_magic:\n" 1560 ANNOTATE_NOENDBR 1561 " movl $1, (%" _ASM_ARG1 ")\n" 1562 ASM_RET 1563 " .size int3_magic, .-int3_magic\n" 1564 " .popsection\n" 1565 ); 1566 1567 extern void int3_selftest_ip(void); /* defined in asm below */ 1568 1569 static int __init 1570 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) 1571 { 1572 unsigned long selftest = (unsigned long)&int3_selftest_ip; 1573 struct die_args *args = data; 1574 struct pt_regs *regs = args->regs; 1575 1576 OPTIMIZER_HIDE_VAR(selftest); 1577 1578 if (!regs || user_mode(regs)) 1579 return NOTIFY_DONE; 1580 1581 if (val != DIE_INT3) 1582 return NOTIFY_DONE; 1583 1584 if (regs->ip - INT3_INSN_SIZE != selftest) 1585 return NOTIFY_DONE; 1586 1587 int3_emulate_call(regs, (unsigned long)&int3_magic); 1588 return NOTIFY_STOP; 1589 } 1590 1591 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */ 1592 static noinline void __init int3_selftest(void) 1593 { 1594 static __initdata struct notifier_block int3_exception_nb = { 1595 .notifier_call = int3_exception_notify, 1596 .priority = INT_MAX-1, /* last */ 1597 }; 1598 unsigned int val = 0; 1599 1600 BUG_ON(register_die_notifier(&int3_exception_nb)); 1601 1602 /* 1603 * Basically: int3_magic(&val); but really complicated :-) 1604 * 1605 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb 1606 * notifier above will emulate CALL for us. 1607 */ 1608 asm volatile ("int3_selftest_ip:\n\t" 1609 ANNOTATE_NOENDBR 1610 " int3; nop; nop; nop; nop\n\t" 1611 : ASM_CALL_CONSTRAINT 1612 : __ASM_SEL_RAW(a, D) (&val) 1613 : "memory"); 1614 1615 BUG_ON(val != 1); 1616 1617 unregister_die_notifier(&int3_exception_nb); 1618 } 1619 1620 static __initdata int __alt_reloc_selftest_addr; 1621 1622 extern void __init __alt_reloc_selftest(void *arg); 1623 __visible noinline void __init __alt_reloc_selftest(void *arg) 1624 { 1625 WARN_ON(arg != &__alt_reloc_selftest_addr); 1626 } 1627 1628 static noinline void __init alt_reloc_selftest(void) 1629 { 1630 /* 1631 * Tests apply_relocation(). 1632 * 1633 * This has a relative immediate (CALL) in a place other than the first 1634 * instruction and additionally on x86_64 we get a RIP-relative LEA: 1635 * 1636 * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c 1637 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4 1638 * 1639 * Getting this wrong will either crash and burn or tickle the WARN 1640 * above. 1641 */ 1642 asm_inline volatile ( 1643 ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS) 1644 : /* output */ 1645 : [mem] "m" (__alt_reloc_selftest_addr) 1646 : _ASM_ARG1 1647 ); 1648 } 1649 1650 void __init alternative_instructions(void) 1651 { 1652 int3_selftest(); 1653 1654 /* 1655 * The patching is not fully atomic, so try to avoid local 1656 * interruptions that might execute the to be patched code. 1657 * Other CPUs are not running. 1658 */ 1659 stop_nmi(); 1660 1661 /* 1662 * Don't stop machine check exceptions while patching. 1663 * MCEs only happen when something got corrupted and in this 1664 * case we must do something about the corruption. 1665 * Ignoring it is worse than an unlikely patching race. 1666 * Also machine checks tend to be broadcast and if one CPU 1667 * goes into machine check the others follow quickly, so we don't 1668 * expect a machine check to cause undue problems during to code 1669 * patching. 1670 */ 1671 1672 /* 1673 * Make sure to set (artificial) features depending on used paravirt 1674 * functions which can later influence alternative patching. 1675 */ 1676 paravirt_set_cap(); 1677 1678 __apply_fineibt(__retpoline_sites, __retpoline_sites_end, 1679 __cfi_sites, __cfi_sites_end, true); 1680 1681 /* 1682 * Rewrite the retpolines, must be done before alternatives since 1683 * those can rewrite the retpoline thunks. 1684 */ 1685 apply_retpolines(__retpoline_sites, __retpoline_sites_end); 1686 apply_returns(__return_sites, __return_sites_end); 1687 1688 apply_alternatives(__alt_instructions, __alt_instructions_end); 1689 1690 /* 1691 * Now all calls are established. Apply the call thunks if 1692 * required. 1693 */ 1694 callthunks_patch_builtin_calls(); 1695 1696 /* 1697 * Seal all functions that do not have their address taken. 1698 */ 1699 apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); 1700 1701 #ifdef CONFIG_SMP 1702 /* Patch to UP if other cpus not imminent. */ 1703 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 1704 uniproc_patched = true; 1705 alternatives_smp_module_add(NULL, "core kernel", 1706 __smp_locks, __smp_locks_end, 1707 _text, _etext); 1708 } 1709 1710 if (!uniproc_patched || num_possible_cpus() == 1) { 1711 free_init_pages("SMP alternatives", 1712 (unsigned long)__smp_locks, 1713 (unsigned long)__smp_locks_end); 1714 } 1715 #endif 1716 1717 restart_nmi(); 1718 alternatives_patched = 1; 1719 1720 alt_reloc_selftest(); 1721 } 1722 1723 /** 1724 * text_poke_early - Update instructions on a live kernel at boot time 1725 * @addr: address to modify 1726 * @opcode: source of the copy 1727 * @len: length to copy 1728 * 1729 * When you use this code to patch more than one byte of an instruction 1730 * you need to make sure that other CPUs cannot execute this code in parallel. 1731 * Also no thread must be currently preempted in the middle of these 1732 * instructions. And on the local CPU you need to be protected against NMI or 1733 * MCE handlers seeing an inconsistent instruction while you patch. 1734 */ 1735 void __init_or_module text_poke_early(void *addr, const void *opcode, 1736 size_t len) 1737 { 1738 unsigned long flags; 1739 1740 if (boot_cpu_has(X86_FEATURE_NX) && 1741 is_module_text_address((unsigned long)addr)) { 1742 /* 1743 * Modules text is marked initially as non-executable, so the 1744 * code cannot be running and speculative code-fetches are 1745 * prevented. Just change the code. 1746 */ 1747 memcpy(addr, opcode, len); 1748 } else { 1749 local_irq_save(flags); 1750 memcpy(addr, opcode, len); 1751 sync_core(); 1752 local_irq_restore(flags); 1753 1754 /* 1755 * Could also do a CLFLUSH here to speed up CPU recovery; but 1756 * that causes hangs on some VIA CPUs. 1757 */ 1758 } 1759 } 1760 1761 typedef struct { 1762 struct mm_struct *mm; 1763 } temp_mm_state_t; 1764 1765 /* 1766 * Using a temporary mm allows to set temporary mappings that are not accessible 1767 * by other CPUs. Such mappings are needed to perform sensitive memory writes 1768 * that override the kernel memory protections (e.g., W^X), without exposing the 1769 * temporary page-table mappings that are required for these write operations to 1770 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the 1771 * mapping is torn down. 1772 * 1773 * Context: The temporary mm needs to be used exclusively by a single core. To 1774 * harden security IRQs must be disabled while the temporary mm is 1775 * loaded, thereby preventing interrupt handler bugs from overriding 1776 * the kernel memory protection. 1777 */ 1778 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) 1779 { 1780 temp_mm_state_t temp_state; 1781 1782 lockdep_assert_irqs_disabled(); 1783 1784 /* 1785 * Make sure not to be in TLB lazy mode, as otherwise we'll end up 1786 * with a stale address space WITHOUT being in lazy mode after 1787 * restoring the previous mm. 1788 */ 1789 if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) 1790 leave_mm(); 1791 1792 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); 1793 switch_mm_irqs_off(NULL, mm, current); 1794 1795 /* 1796 * If breakpoints are enabled, disable them while the temporary mm is 1797 * used. Userspace might set up watchpoints on addresses that are used 1798 * in the temporary mm, which would lead to wrong signals being sent or 1799 * crashes. 1800 * 1801 * Note that breakpoints are not disabled selectively, which also causes 1802 * kernel breakpoints (e.g., perf's) to be disabled. This might be 1803 * undesirable, but still seems reasonable as the code that runs in the 1804 * temporary mm should be short. 1805 */ 1806 if (hw_breakpoint_active()) 1807 hw_breakpoint_disable(); 1808 1809 return temp_state; 1810 } 1811 1812 static inline void unuse_temporary_mm(temp_mm_state_t prev_state) 1813 { 1814 lockdep_assert_irqs_disabled(); 1815 switch_mm_irqs_off(NULL, prev_state.mm, current); 1816 1817 /* 1818 * Restore the breakpoints if they were disabled before the temporary mm 1819 * was loaded. 1820 */ 1821 if (hw_breakpoint_active()) 1822 hw_breakpoint_restore(); 1823 } 1824 1825 __ro_after_init struct mm_struct *poking_mm; 1826 __ro_after_init unsigned long poking_addr; 1827 1828 static void text_poke_memcpy(void *dst, const void *src, size_t len) 1829 { 1830 memcpy(dst, src, len); 1831 } 1832 1833 static void text_poke_memset(void *dst, const void *src, size_t len) 1834 { 1835 int c = *(const int *)src; 1836 1837 memset(dst, c, len); 1838 } 1839 1840 typedef void text_poke_f(void *dst, const void *src, size_t len); 1841 1842 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len) 1843 { 1844 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; 1845 struct page *pages[2] = {NULL}; 1846 temp_mm_state_t prev; 1847 unsigned long flags; 1848 pte_t pte, *ptep; 1849 spinlock_t *ptl; 1850 pgprot_t pgprot; 1851 1852 /* 1853 * While boot memory allocator is running we cannot use struct pages as 1854 * they are not yet initialized. There is no way to recover. 1855 */ 1856 BUG_ON(!after_bootmem); 1857 1858 if (!core_kernel_text((unsigned long)addr)) { 1859 pages[0] = vmalloc_to_page(addr); 1860 if (cross_page_boundary) 1861 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 1862 } else { 1863 pages[0] = virt_to_page(addr); 1864 WARN_ON(!PageReserved(pages[0])); 1865 if (cross_page_boundary) 1866 pages[1] = virt_to_page(addr + PAGE_SIZE); 1867 } 1868 /* 1869 * If something went wrong, crash and burn since recovery paths are not 1870 * implemented. 1871 */ 1872 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); 1873 1874 /* 1875 * Map the page without the global bit, as TLB flushing is done with 1876 * flush_tlb_mm_range(), which is intended for non-global PTEs. 1877 */ 1878 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL); 1879 1880 /* 1881 * The lock is not really needed, but this allows to avoid open-coding. 1882 */ 1883 ptep = get_locked_pte(poking_mm, poking_addr, &ptl); 1884 1885 /* 1886 * This must not fail; preallocated in poking_init(). 1887 */ 1888 VM_BUG_ON(!ptep); 1889 1890 local_irq_save(flags); 1891 1892 pte = mk_pte(pages[0], pgprot); 1893 set_pte_at(poking_mm, poking_addr, ptep, pte); 1894 1895 if (cross_page_boundary) { 1896 pte = mk_pte(pages[1], pgprot); 1897 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); 1898 } 1899 1900 /* 1901 * Loading the temporary mm behaves as a compiler barrier, which 1902 * guarantees that the PTE will be set at the time memcpy() is done. 1903 */ 1904 prev = use_temporary_mm(poking_mm); 1905 1906 kasan_disable_current(); 1907 func((u8 *)poking_addr + offset_in_page(addr), src, len); 1908 kasan_enable_current(); 1909 1910 /* 1911 * Ensure that the PTE is only cleared after the instructions of memcpy 1912 * were issued by using a compiler barrier. 1913 */ 1914 barrier(); 1915 1916 pte_clear(poking_mm, poking_addr, ptep); 1917 if (cross_page_boundary) 1918 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); 1919 1920 /* 1921 * Loading the previous page-table hierarchy requires a serializing 1922 * instruction that already allows the core to see the updated version. 1923 * Xen-PV is assumed to serialize execution in a similar manner. 1924 */ 1925 unuse_temporary_mm(prev); 1926 1927 /* 1928 * Flushing the TLB might involve IPIs, which would require enabled 1929 * IRQs, but not if the mm is not used, as it is in this point. 1930 */ 1931 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + 1932 (cross_page_boundary ? 2 : 1) * PAGE_SIZE, 1933 PAGE_SHIFT, false); 1934 1935 if (func == text_poke_memcpy) { 1936 /* 1937 * If the text does not match what we just wrote then something is 1938 * fundamentally screwy; there's nothing we can really do about that. 1939 */ 1940 BUG_ON(memcmp(addr, src, len)); 1941 } 1942 1943 local_irq_restore(flags); 1944 pte_unmap_unlock(ptep, ptl); 1945 return addr; 1946 } 1947 1948 /** 1949 * text_poke - Update instructions on a live kernel 1950 * @addr: address to modify 1951 * @opcode: source of the copy 1952 * @len: length to copy 1953 * 1954 * Only atomic text poke/set should be allowed when not doing early patching. 1955 * It means the size must be writable atomically and the address must be aligned 1956 * in a way that permits an atomic write. It also makes sure we fit on a single 1957 * page. 1958 * 1959 * Note that the caller must ensure that if the modified code is part of a 1960 * module, the module would not be removed during poking. This can be achieved 1961 * by registering a module notifier, and ordering module removal and patching 1962 * through a mutex. 1963 */ 1964 void *text_poke(void *addr, const void *opcode, size_t len) 1965 { 1966 lockdep_assert_held(&text_mutex); 1967 1968 return __text_poke(text_poke_memcpy, addr, opcode, len); 1969 } 1970 1971 /** 1972 * text_poke_kgdb - Update instructions on a live kernel by kgdb 1973 * @addr: address to modify 1974 * @opcode: source of the copy 1975 * @len: length to copy 1976 * 1977 * Only atomic text poke/set should be allowed when not doing early patching. 1978 * It means the size must be writable atomically and the address must be aligned 1979 * in a way that permits an atomic write. It also makes sure we fit on a single 1980 * page. 1981 * 1982 * Context: should only be used by kgdb, which ensures no other core is running, 1983 * despite the fact it does not hold the text_mutex. 1984 */ 1985 void *text_poke_kgdb(void *addr, const void *opcode, size_t len) 1986 { 1987 return __text_poke(text_poke_memcpy, addr, opcode, len); 1988 } 1989 1990 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, 1991 bool core_ok) 1992 { 1993 unsigned long start = (unsigned long)addr; 1994 size_t patched = 0; 1995 1996 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start))) 1997 return NULL; 1998 1999 while (patched < len) { 2000 unsigned long ptr = start + patched; 2001 size_t s; 2002 2003 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); 2004 2005 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s); 2006 patched += s; 2007 } 2008 return addr; 2009 } 2010 2011 /** 2012 * text_poke_copy - Copy instructions into (an unused part of) RX memory 2013 * @addr: address to modify 2014 * @opcode: source of the copy 2015 * @len: length to copy, could be more than 2x PAGE_SIZE 2016 * 2017 * Not safe against concurrent execution; useful for JITs to dump 2018 * new code blocks into unused regions of RX memory. Can be used in 2019 * conjunction with synchronize_rcu_tasks() to wait for existing 2020 * execution to quiesce after having made sure no existing functions 2021 * pointers are live. 2022 */ 2023 void *text_poke_copy(void *addr, const void *opcode, size_t len) 2024 { 2025 mutex_lock(&text_mutex); 2026 addr = text_poke_copy_locked(addr, opcode, len, false); 2027 mutex_unlock(&text_mutex); 2028 return addr; 2029 } 2030 2031 /** 2032 * text_poke_set - memset into (an unused part of) RX memory 2033 * @addr: address to modify 2034 * @c: the byte to fill the area with 2035 * @len: length to copy, could be more than 2x PAGE_SIZE 2036 * 2037 * This is useful to overwrite unused regions of RX memory with illegal 2038 * instructions. 2039 */ 2040 void *text_poke_set(void *addr, int c, size_t len) 2041 { 2042 unsigned long start = (unsigned long)addr; 2043 size_t patched = 0; 2044 2045 if (WARN_ON_ONCE(core_kernel_text(start))) 2046 return NULL; 2047 2048 mutex_lock(&text_mutex); 2049 while (patched < len) { 2050 unsigned long ptr = start + patched; 2051 size_t s; 2052 2053 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); 2054 2055 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s); 2056 patched += s; 2057 } 2058 mutex_unlock(&text_mutex); 2059 return addr; 2060 } 2061 2062 static void do_sync_core(void *info) 2063 { 2064 sync_core(); 2065 } 2066 2067 void text_poke_sync(void) 2068 { 2069 on_each_cpu(do_sync_core, NULL, 1); 2070 } 2071 2072 /* 2073 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of 2074 * this thing. When len == 6 everything is prefixed with 0x0f and we map 2075 * opcode to Jcc.d8, using len to distinguish. 2076 */ 2077 struct text_poke_loc { 2078 /* addr := _stext + rel_addr */ 2079 s32 rel_addr; 2080 s32 disp; 2081 u8 len; 2082 u8 opcode; 2083 const u8 text[POKE_MAX_OPCODE_SIZE]; 2084 /* see text_poke_bp_batch() */ 2085 u8 old; 2086 }; 2087 2088 struct bp_patching_desc { 2089 struct text_poke_loc *vec; 2090 int nr_entries; 2091 atomic_t refs; 2092 }; 2093 2094 static struct bp_patching_desc bp_desc; 2095 2096 static __always_inline 2097 struct bp_patching_desc *try_get_desc(void) 2098 { 2099 struct bp_patching_desc *desc = &bp_desc; 2100 2101 if (!raw_atomic_inc_not_zero(&desc->refs)) 2102 return NULL; 2103 2104 return desc; 2105 } 2106 2107 static __always_inline void put_desc(void) 2108 { 2109 struct bp_patching_desc *desc = &bp_desc; 2110 2111 smp_mb__before_atomic(); 2112 raw_atomic_dec(&desc->refs); 2113 } 2114 2115 static __always_inline void *text_poke_addr(struct text_poke_loc *tp) 2116 { 2117 return _stext + tp->rel_addr; 2118 } 2119 2120 static __always_inline int patch_cmp(const void *key, const void *elt) 2121 { 2122 struct text_poke_loc *tp = (struct text_poke_loc *) elt; 2123 2124 if (key < text_poke_addr(tp)) 2125 return -1; 2126 if (key > text_poke_addr(tp)) 2127 return 1; 2128 return 0; 2129 } 2130 2131 noinstr int poke_int3_handler(struct pt_regs *regs) 2132 { 2133 struct bp_patching_desc *desc; 2134 struct text_poke_loc *tp; 2135 int ret = 0; 2136 void *ip; 2137 2138 if (user_mode(regs)) 2139 return 0; 2140 2141 /* 2142 * Having observed our INT3 instruction, we now must observe 2143 * bp_desc with non-zero refcount: 2144 * 2145 * bp_desc.refs = 1 INT3 2146 * WMB RMB 2147 * write INT3 if (bp_desc.refs != 0) 2148 */ 2149 smp_rmb(); 2150 2151 desc = try_get_desc(); 2152 if (!desc) 2153 return 0; 2154 2155 /* 2156 * Discount the INT3. See text_poke_bp_batch(). 2157 */ 2158 ip = (void *) regs->ip - INT3_INSN_SIZE; 2159 2160 /* 2161 * Skip the binary search if there is a single member in the vector. 2162 */ 2163 if (unlikely(desc->nr_entries > 1)) { 2164 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries, 2165 sizeof(struct text_poke_loc), 2166 patch_cmp); 2167 if (!tp) 2168 goto out_put; 2169 } else { 2170 tp = desc->vec; 2171 if (text_poke_addr(tp) != ip) 2172 goto out_put; 2173 } 2174 2175 ip += tp->len; 2176 2177 switch (tp->opcode) { 2178 case INT3_INSN_OPCODE: 2179 /* 2180 * Someone poked an explicit INT3, they'll want to handle it, 2181 * do not consume. 2182 */ 2183 goto out_put; 2184 2185 case RET_INSN_OPCODE: 2186 int3_emulate_ret(regs); 2187 break; 2188 2189 case CALL_INSN_OPCODE: 2190 int3_emulate_call(regs, (long)ip + tp->disp); 2191 break; 2192 2193 case JMP32_INSN_OPCODE: 2194 case JMP8_INSN_OPCODE: 2195 int3_emulate_jmp(regs, (long)ip + tp->disp); 2196 break; 2197 2198 case 0x70 ... 0x7f: /* Jcc */ 2199 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp); 2200 break; 2201 2202 default: 2203 BUG(); 2204 } 2205 2206 ret = 1; 2207 2208 out_put: 2209 put_desc(); 2210 return ret; 2211 } 2212 2213 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) 2214 static struct text_poke_loc tp_vec[TP_VEC_MAX]; 2215 static int tp_vec_nr; 2216 2217 /** 2218 * text_poke_bp_batch() -- update instructions on live kernel on SMP 2219 * @tp: vector of instructions to patch 2220 * @nr_entries: number of entries in the vector 2221 * 2222 * Modify multi-byte instruction by using int3 breakpoint on SMP. 2223 * We completely avoid stop_machine() here, and achieve the 2224 * synchronization using int3 breakpoint. 2225 * 2226 * The way it is done: 2227 * - For each entry in the vector: 2228 * - add a int3 trap to the address that will be patched 2229 * - sync cores 2230 * - For each entry in the vector: 2231 * - update all but the first byte of the patched range 2232 * - sync cores 2233 * - For each entry in the vector: 2234 * - replace the first byte (int3) by the first byte of 2235 * replacing opcode 2236 * - sync cores 2237 */ 2238 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) 2239 { 2240 unsigned char int3 = INT3_INSN_OPCODE; 2241 unsigned int i; 2242 int do_sync; 2243 2244 lockdep_assert_held(&text_mutex); 2245 2246 bp_desc.vec = tp; 2247 bp_desc.nr_entries = nr_entries; 2248 2249 /* 2250 * Corresponds to the implicit memory barrier in try_get_desc() to 2251 * ensure reading a non-zero refcount provides up to date bp_desc data. 2252 */ 2253 atomic_set_release(&bp_desc.refs, 1); 2254 2255 /* 2256 * Function tracing can enable thousands of places that need to be 2257 * updated. This can take quite some time, and with full kernel debugging 2258 * enabled, this could cause the softlockup watchdog to trigger. 2259 * This function gets called every 256 entries added to be patched. 2260 * Call cond_resched() here to make sure that other tasks can get scheduled 2261 * while processing all the functions being patched. 2262 */ 2263 cond_resched(); 2264 2265 /* 2266 * Corresponding read barrier in int3 notifier for making sure the 2267 * nr_entries and handler are correctly ordered wrt. patching. 2268 */ 2269 smp_wmb(); 2270 2271 /* 2272 * First step: add a int3 trap to the address that will be patched. 2273 */ 2274 for (i = 0; i < nr_entries; i++) { 2275 tp[i].old = *(u8 *)text_poke_addr(&tp[i]); 2276 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE); 2277 } 2278 2279 text_poke_sync(); 2280 2281 /* 2282 * Second step: update all but the first byte of the patched range. 2283 */ 2284 for (do_sync = 0, i = 0; i < nr_entries; i++) { 2285 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, }; 2286 u8 _new[POKE_MAX_OPCODE_SIZE+1]; 2287 const u8 *new = tp[i].text; 2288 int len = tp[i].len; 2289 2290 if (len - INT3_INSN_SIZE > 0) { 2291 memcpy(old + INT3_INSN_SIZE, 2292 text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 2293 len - INT3_INSN_SIZE); 2294 2295 if (len == 6) { 2296 _new[0] = 0x0f; 2297 memcpy(_new + 1, new, 5); 2298 new = _new; 2299 } 2300 2301 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 2302 new + INT3_INSN_SIZE, 2303 len - INT3_INSN_SIZE); 2304 2305 do_sync++; 2306 } 2307 2308 /* 2309 * Emit a perf event to record the text poke, primarily to 2310 * support Intel PT decoding which must walk the executable code 2311 * to reconstruct the trace. The flow up to here is: 2312 * - write INT3 byte 2313 * - IPI-SYNC 2314 * - write instruction tail 2315 * At this point the actual control flow will be through the 2316 * INT3 and handler and not hit the old or new instruction. 2317 * Intel PT outputs FUP/TIP packets for the INT3, so the flow 2318 * can still be decoded. Subsequently: 2319 * - emit RECORD_TEXT_POKE with the new instruction 2320 * - IPI-SYNC 2321 * - write first byte 2322 * - IPI-SYNC 2323 * So before the text poke event timestamp, the decoder will see 2324 * either the old instruction flow or FUP/TIP of INT3. After the 2325 * text poke event timestamp, the decoder will see either the 2326 * new instruction flow or FUP/TIP of INT3. Thus decoders can 2327 * use the timestamp as the point at which to modify the 2328 * executable code. 2329 * The old instruction is recorded so that the event can be 2330 * processed forwards or backwards. 2331 */ 2332 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len); 2333 } 2334 2335 if (do_sync) { 2336 /* 2337 * According to Intel, this core syncing is very likely 2338 * not necessary and we'd be safe even without it. But 2339 * better safe than sorry (plus there's not only Intel). 2340 */ 2341 text_poke_sync(); 2342 } 2343 2344 /* 2345 * Third step: replace the first byte (int3) by the first byte of 2346 * replacing opcode. 2347 */ 2348 for (do_sync = 0, i = 0; i < nr_entries; i++) { 2349 u8 byte = tp[i].text[0]; 2350 2351 if (tp[i].len == 6) 2352 byte = 0x0f; 2353 2354 if (byte == INT3_INSN_OPCODE) 2355 continue; 2356 2357 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE); 2358 do_sync++; 2359 } 2360 2361 if (do_sync) 2362 text_poke_sync(); 2363 2364 /* 2365 * Remove and wait for refs to be zero. 2366 */ 2367 if (!atomic_dec_and_test(&bp_desc.refs)) 2368 atomic_cond_read_acquire(&bp_desc.refs, !VAL); 2369 } 2370 2371 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, 2372 const void *opcode, size_t len, const void *emulate) 2373 { 2374 struct insn insn; 2375 int ret, i = 0; 2376 2377 if (len == 6) 2378 i = 1; 2379 memcpy((void *)tp->text, opcode+i, len-i); 2380 if (!emulate) 2381 emulate = opcode; 2382 2383 ret = insn_decode_kernel(&insn, emulate); 2384 BUG_ON(ret < 0); 2385 2386 tp->rel_addr = addr - (void *)_stext; 2387 tp->len = len; 2388 tp->opcode = insn.opcode.bytes[0]; 2389 2390 if (is_jcc32(&insn)) { 2391 /* 2392 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish. 2393 */ 2394 tp->opcode = insn.opcode.bytes[1] - 0x10; 2395 } 2396 2397 switch (tp->opcode) { 2398 case RET_INSN_OPCODE: 2399 case JMP32_INSN_OPCODE: 2400 case JMP8_INSN_OPCODE: 2401 /* 2402 * Control flow instructions without implied execution of the 2403 * next instruction can be padded with INT3. 2404 */ 2405 for (i = insn.length; i < len; i++) 2406 BUG_ON(tp->text[i] != INT3_INSN_OPCODE); 2407 break; 2408 2409 default: 2410 BUG_ON(len != insn.length); 2411 } 2412 2413 switch (tp->opcode) { 2414 case INT3_INSN_OPCODE: 2415 case RET_INSN_OPCODE: 2416 break; 2417 2418 case CALL_INSN_OPCODE: 2419 case JMP32_INSN_OPCODE: 2420 case JMP8_INSN_OPCODE: 2421 case 0x70 ... 0x7f: /* Jcc */ 2422 tp->disp = insn.immediate.value; 2423 break; 2424 2425 default: /* assume NOP */ 2426 switch (len) { 2427 case 2: /* NOP2 -- emulate as JMP8+0 */ 2428 BUG_ON(memcmp(emulate, x86_nops[len], len)); 2429 tp->opcode = JMP8_INSN_OPCODE; 2430 tp->disp = 0; 2431 break; 2432 2433 case 5: /* NOP5 -- emulate as JMP32+0 */ 2434 BUG_ON(memcmp(emulate, x86_nops[len], len)); 2435 tp->opcode = JMP32_INSN_OPCODE; 2436 tp->disp = 0; 2437 break; 2438 2439 default: /* unknown instruction */ 2440 BUG(); 2441 } 2442 break; 2443 } 2444 } 2445 2446 /* 2447 * We hard rely on the tp_vec being ordered; ensure this is so by flushing 2448 * early if needed. 2449 */ 2450 static bool tp_order_fail(void *addr) 2451 { 2452 struct text_poke_loc *tp; 2453 2454 if (!tp_vec_nr) 2455 return false; 2456 2457 if (!addr) /* force */ 2458 return true; 2459 2460 tp = &tp_vec[tp_vec_nr - 1]; 2461 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) 2462 return true; 2463 2464 return false; 2465 } 2466 2467 static void text_poke_flush(void *addr) 2468 { 2469 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { 2470 text_poke_bp_batch(tp_vec, tp_vec_nr); 2471 tp_vec_nr = 0; 2472 } 2473 } 2474 2475 void text_poke_finish(void) 2476 { 2477 text_poke_flush(NULL); 2478 } 2479 2480 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) 2481 { 2482 struct text_poke_loc *tp; 2483 2484 text_poke_flush(addr); 2485 2486 tp = &tp_vec[tp_vec_nr++]; 2487 text_poke_loc_init(tp, addr, opcode, len, emulate); 2488 } 2489 2490 /** 2491 * text_poke_bp() -- update instructions on live kernel on SMP 2492 * @addr: address to patch 2493 * @opcode: opcode of new instruction 2494 * @len: length to copy 2495 * @emulate: instruction to be emulated 2496 * 2497 * Update a single instruction with the vector in the stack, avoiding 2498 * dynamically allocated memory. This function should be used when it is 2499 * not possible to allocate memory. 2500 */ 2501 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) 2502 { 2503 struct text_poke_loc tp; 2504 2505 text_poke_loc_init(&tp, addr, opcode, len, emulate); 2506 text_poke_bp_batch(&tp, 1); 2507 } 2508