1 #define pr_fmt(fmt) "SMP alternatives: " fmt 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/stringify.h> 8 #include <linux/mm.h> 9 #include <linux/vmalloc.h> 10 #include <linux/memory.h> 11 #include <linux/stop_machine.h> 12 #include <linux/slab.h> 13 #include <linux/kdebug.h> 14 #include <asm/text-patching.h> 15 #include <asm/alternative.h> 16 #include <asm/sections.h> 17 #include <asm/pgtable.h> 18 #include <asm/mce.h> 19 #include <asm/nmi.h> 20 #include <asm/cacheflush.h> 21 #include <asm/tlbflush.h> 22 #include <asm/io.h> 23 #include <asm/fixmap.h> 24 25 int __read_mostly alternatives_patched; 26 27 EXPORT_SYMBOL_GPL(alternatives_patched); 28 29 #define MAX_PATCH_LEN (255-1) 30 31 static int __initdata_or_module debug_alternative; 32 33 static int __init debug_alt(char *str) 34 { 35 debug_alternative = 1; 36 return 1; 37 } 38 __setup("debug-alternative", debug_alt); 39 40 static int noreplace_smp; 41 42 static int __init setup_noreplace_smp(char *str) 43 { 44 noreplace_smp = 1; 45 return 1; 46 } 47 __setup("noreplace-smp", setup_noreplace_smp); 48 49 #ifdef CONFIG_PARAVIRT 50 static int __initdata_or_module noreplace_paravirt = 0; 51 52 static int __init setup_noreplace_paravirt(char *str) 53 { 54 noreplace_paravirt = 1; 55 return 1; 56 } 57 __setup("noreplace-paravirt", setup_noreplace_paravirt); 58 #endif 59 60 #define DPRINTK(fmt, args...) \ 61 do { \ 62 if (debug_alternative) \ 63 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ 64 } while (0) 65 66 #define DUMP_BYTES(buf, len, fmt, args...) \ 67 do { \ 68 if (unlikely(debug_alternative)) { \ 69 int j; \ 70 \ 71 if (!(len)) \ 72 break; \ 73 \ 74 printk(KERN_DEBUG fmt, ##args); \ 75 for (j = 0; j < (len) - 1; j++) \ 76 printk(KERN_CONT "%02hhx ", buf[j]); \ 77 printk(KERN_CONT "%02hhx\n", buf[j]); \ 78 } \ 79 } while (0) 80 81 /* 82 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 83 * that correspond to that nop. Getting from one nop to the next, we 84 * add to the array the offset that is equal to the sum of all sizes of 85 * nops preceding the one we are after. 86 * 87 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the 88 * nice symmetry of sizes of the previous nops. 89 */ 90 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) 91 static const unsigned char intelnops[] = 92 { 93 GENERIC_NOP1, 94 GENERIC_NOP2, 95 GENERIC_NOP3, 96 GENERIC_NOP4, 97 GENERIC_NOP5, 98 GENERIC_NOP6, 99 GENERIC_NOP7, 100 GENERIC_NOP8, 101 GENERIC_NOP5_ATOMIC 102 }; 103 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = 104 { 105 NULL, 106 intelnops, 107 intelnops + 1, 108 intelnops + 1 + 2, 109 intelnops + 1 + 2 + 3, 110 intelnops + 1 + 2 + 3 + 4, 111 intelnops + 1 + 2 + 3 + 4 + 5, 112 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 113 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 114 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 115 }; 116 #endif 117 118 #ifdef K8_NOP1 119 static const unsigned char k8nops[] = 120 { 121 K8_NOP1, 122 K8_NOP2, 123 K8_NOP3, 124 K8_NOP4, 125 K8_NOP5, 126 K8_NOP6, 127 K8_NOP7, 128 K8_NOP8, 129 K8_NOP5_ATOMIC 130 }; 131 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = 132 { 133 NULL, 134 k8nops, 135 k8nops + 1, 136 k8nops + 1 + 2, 137 k8nops + 1 + 2 + 3, 138 k8nops + 1 + 2 + 3 + 4, 139 k8nops + 1 + 2 + 3 + 4 + 5, 140 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 141 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 142 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 143 }; 144 #endif 145 146 #if defined(K7_NOP1) && !defined(CONFIG_X86_64) 147 static const unsigned char k7nops[] = 148 { 149 K7_NOP1, 150 K7_NOP2, 151 K7_NOP3, 152 K7_NOP4, 153 K7_NOP5, 154 K7_NOP6, 155 K7_NOP7, 156 K7_NOP8, 157 K7_NOP5_ATOMIC 158 }; 159 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = 160 { 161 NULL, 162 k7nops, 163 k7nops + 1, 164 k7nops + 1 + 2, 165 k7nops + 1 + 2 + 3, 166 k7nops + 1 + 2 + 3 + 4, 167 k7nops + 1 + 2 + 3 + 4 + 5, 168 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 169 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 170 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 171 }; 172 #endif 173 174 #ifdef P6_NOP1 175 static const unsigned char p6nops[] = 176 { 177 P6_NOP1, 178 P6_NOP2, 179 P6_NOP3, 180 P6_NOP4, 181 P6_NOP5, 182 P6_NOP6, 183 P6_NOP7, 184 P6_NOP8, 185 P6_NOP5_ATOMIC 186 }; 187 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = 188 { 189 NULL, 190 p6nops, 191 p6nops + 1, 192 p6nops + 1 + 2, 193 p6nops + 1 + 2 + 3, 194 p6nops + 1 + 2 + 3 + 4, 195 p6nops + 1 + 2 + 3 + 4 + 5, 196 p6nops + 1 + 2 + 3 + 4 + 5 + 6, 197 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 198 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 199 }; 200 #endif 201 202 /* Initialize these to a safe default */ 203 #ifdef CONFIG_X86_64 204 const unsigned char * const *ideal_nops = p6_nops; 205 #else 206 const unsigned char * const *ideal_nops = intel_nops; 207 #endif 208 209 void __init arch_init_ideal_nops(void) 210 { 211 switch (boot_cpu_data.x86_vendor) { 212 case X86_VENDOR_INTEL: 213 /* 214 * Due to a decoder implementation quirk, some 215 * specific Intel CPUs actually perform better with 216 * the "k8_nops" than with the SDM-recommended NOPs. 217 */ 218 if (boot_cpu_data.x86 == 6 && 219 boot_cpu_data.x86_model >= 0x0f && 220 boot_cpu_data.x86_model != 0x1c && 221 boot_cpu_data.x86_model != 0x26 && 222 boot_cpu_data.x86_model != 0x27 && 223 boot_cpu_data.x86_model < 0x30) { 224 ideal_nops = k8_nops; 225 } else if (boot_cpu_has(X86_FEATURE_NOPL)) { 226 ideal_nops = p6_nops; 227 } else { 228 #ifdef CONFIG_X86_64 229 ideal_nops = k8_nops; 230 #else 231 ideal_nops = intel_nops; 232 #endif 233 } 234 break; 235 236 case X86_VENDOR_AMD: 237 if (boot_cpu_data.x86 > 0xf) { 238 ideal_nops = p6_nops; 239 return; 240 } 241 242 /* fall through */ 243 244 default: 245 #ifdef CONFIG_X86_64 246 ideal_nops = k8_nops; 247 #else 248 if (boot_cpu_has(X86_FEATURE_K8)) 249 ideal_nops = k8_nops; 250 else if (boot_cpu_has(X86_FEATURE_K7)) 251 ideal_nops = k7_nops; 252 else 253 ideal_nops = intel_nops; 254 #endif 255 } 256 } 257 258 /* Use this to add nops to a buffer, then text_poke the whole buffer. */ 259 static void __init_or_module add_nops(void *insns, unsigned int len) 260 { 261 while (len > 0) { 262 unsigned int noplen = len; 263 if (noplen > ASM_NOP_MAX) 264 noplen = ASM_NOP_MAX; 265 memcpy(insns, ideal_nops[noplen], noplen); 266 insns += noplen; 267 len -= noplen; 268 } 269 } 270 271 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 272 extern s32 __smp_locks[], __smp_locks_end[]; 273 void *text_poke_early(void *addr, const void *opcode, size_t len); 274 275 /* 276 * Are we looking at a near JMP with a 1 or 4-byte displacement. 277 */ 278 static inline bool is_jmp(const u8 opcode) 279 { 280 return opcode == 0xeb || opcode == 0xe9; 281 } 282 283 static void __init_or_module 284 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) 285 { 286 u8 *next_rip, *tgt_rip; 287 s32 n_dspl, o_dspl; 288 int repl_len; 289 290 if (a->replacementlen != 5) 291 return; 292 293 o_dspl = *(s32 *)(insnbuf + 1); 294 295 /* next_rip of the replacement JMP */ 296 next_rip = repl_insn + a->replacementlen; 297 /* target rip of the replacement JMP */ 298 tgt_rip = next_rip + o_dspl; 299 n_dspl = tgt_rip - orig_insn; 300 301 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); 302 303 if (tgt_rip - orig_insn >= 0) { 304 if (n_dspl - 2 <= 127) 305 goto two_byte_jmp; 306 else 307 goto five_byte_jmp; 308 /* negative offset */ 309 } else { 310 if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) 311 goto two_byte_jmp; 312 else 313 goto five_byte_jmp; 314 } 315 316 two_byte_jmp: 317 n_dspl -= 2; 318 319 insnbuf[0] = 0xeb; 320 insnbuf[1] = (s8)n_dspl; 321 add_nops(insnbuf + 2, 3); 322 323 repl_len = 2; 324 goto done; 325 326 five_byte_jmp: 327 n_dspl -= 5; 328 329 insnbuf[0] = 0xe9; 330 *(s32 *)&insnbuf[1] = n_dspl; 331 332 repl_len = 5; 333 334 done: 335 336 DPRINTK("final displ: 0x%08x, JMP 0x%lx", 337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 338 } 339 340 /* 341 * "noinline" to cause control flow change and thus invalidate I$ and 342 * cause refetch after modification. 343 */ 344 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 345 { 346 unsigned long flags; 347 int i; 348 349 for (i = 0; i < a->padlen; i++) { 350 if (instr[i] != 0x90) 351 return; 352 } 353 354 local_irq_save(flags); 355 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 356 local_irq_restore(flags); 357 358 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", 359 instr, a->instrlen - a->padlen, a->padlen); 360 } 361 362 /* 363 * Replace instructions with better alternatives for this CPU type. This runs 364 * before SMP is initialized to avoid SMP problems with self modifying code. 365 * This implies that asymmetric systems where APs have less capabilities than 366 * the boot processor are not handled. Tough. Make sure you disable such 367 * features by hand. 368 * 369 * Marked "noinline" to cause control flow change and thus insn cache 370 * to refetch changed I$ lines. 371 */ 372 void __init_or_module noinline apply_alternatives(struct alt_instr *start, 373 struct alt_instr *end) 374 { 375 struct alt_instr *a; 376 u8 *instr, *replacement; 377 u8 insnbuf[MAX_PATCH_LEN]; 378 379 DPRINTK("alt table %p -> %p", start, end); 380 /* 381 * The scan order should be from start to end. A later scanned 382 * alternative code can overwrite previously scanned alternative code. 383 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 384 * patch code. 385 * 386 * So be careful if you want to change the scan order to any other 387 * order. 388 */ 389 for (a = start; a < end; a++) { 390 int insnbuf_sz = 0; 391 392 instr = (u8 *)&a->instr_offset + a->instr_offset; 393 replacement = (u8 *)&a->repl_offset + a->repl_offset; 394 BUG_ON(a->instrlen > sizeof(insnbuf)); 395 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 396 if (!boot_cpu_has(a->cpuid)) { 397 if (a->padlen > 1) 398 optimize_nops(a, instr); 399 400 continue; 401 } 402 403 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", 404 a->cpuid >> 5, 405 a->cpuid & 0x1f, 406 instr, a->instrlen, 407 replacement, a->replacementlen, a->padlen); 408 409 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); 410 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); 411 412 memcpy(insnbuf, replacement, a->replacementlen); 413 insnbuf_sz = a->replacementlen; 414 415 /* 416 * 0xe8 is a relative jump; fix the offset. 417 * 418 * Instruction length is checked before the opcode to avoid 419 * accessing uninitialized bytes for zero-length replacements. 420 */ 421 if (a->replacementlen == 5 && *insnbuf == 0xe8) { 422 *(s32 *)(insnbuf + 1) += replacement - instr; 423 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 424 *(s32 *)(insnbuf + 1), 425 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); 426 } 427 428 if (a->replacementlen && is_jmp(replacement[0])) 429 recompute_jump(a, instr, replacement, insnbuf); 430 431 if (a->instrlen > a->replacementlen) { 432 add_nops(insnbuf + a->replacementlen, 433 a->instrlen - a->replacementlen); 434 insnbuf_sz += a->instrlen - a->replacementlen; 435 } 436 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); 437 438 text_poke_early(instr, insnbuf, insnbuf_sz); 439 } 440 } 441 442 #ifdef CONFIG_SMP 443 static void alternatives_smp_lock(const s32 *start, const s32 *end, 444 u8 *text, u8 *text_end) 445 { 446 const s32 *poff; 447 448 for (poff = start; poff < end; poff++) { 449 u8 *ptr = (u8 *)poff + *poff; 450 451 if (!*poff || ptr < text || ptr >= text_end) 452 continue; 453 /* turn DS segment override prefix into lock prefix */ 454 if (*ptr == 0x3e) 455 text_poke(ptr, ((unsigned char []){0xf0}), 1); 456 } 457 } 458 459 static void alternatives_smp_unlock(const s32 *start, const s32 *end, 460 u8 *text, u8 *text_end) 461 { 462 const s32 *poff; 463 464 for (poff = start; poff < end; poff++) { 465 u8 *ptr = (u8 *)poff + *poff; 466 467 if (!*poff || ptr < text || ptr >= text_end) 468 continue; 469 /* turn lock prefix into DS segment override prefix */ 470 if (*ptr == 0xf0) 471 text_poke(ptr, ((unsigned char []){0x3E}), 1); 472 } 473 } 474 475 struct smp_alt_module { 476 /* what is this ??? */ 477 struct module *mod; 478 char *name; 479 480 /* ptrs to lock prefixes */ 481 const s32 *locks; 482 const s32 *locks_end; 483 484 /* .text segment, needed to avoid patching init code ;) */ 485 u8 *text; 486 u8 *text_end; 487 488 struct list_head next; 489 }; 490 static LIST_HEAD(smp_alt_modules); 491 static bool uniproc_patched = false; /* protected by text_mutex */ 492 493 void __init_or_module alternatives_smp_module_add(struct module *mod, 494 char *name, 495 void *locks, void *locks_end, 496 void *text, void *text_end) 497 { 498 struct smp_alt_module *smp; 499 500 mutex_lock(&text_mutex); 501 if (!uniproc_patched) 502 goto unlock; 503 504 if (num_possible_cpus() == 1) 505 /* Don't bother remembering, we'll never have to undo it. */ 506 goto smp_unlock; 507 508 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 509 if (NULL == smp) 510 /* we'll run the (safe but slow) SMP code then ... */ 511 goto unlock; 512 513 smp->mod = mod; 514 smp->name = name; 515 smp->locks = locks; 516 smp->locks_end = locks_end; 517 smp->text = text; 518 smp->text_end = text_end; 519 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", 520 smp->locks, smp->locks_end, 521 smp->text, smp->text_end, smp->name); 522 523 list_add_tail(&smp->next, &smp_alt_modules); 524 smp_unlock: 525 alternatives_smp_unlock(locks, locks_end, text, text_end); 526 unlock: 527 mutex_unlock(&text_mutex); 528 } 529 530 void __init_or_module alternatives_smp_module_del(struct module *mod) 531 { 532 struct smp_alt_module *item; 533 534 mutex_lock(&text_mutex); 535 list_for_each_entry(item, &smp_alt_modules, next) { 536 if (mod != item->mod) 537 continue; 538 list_del(&item->next); 539 kfree(item); 540 break; 541 } 542 mutex_unlock(&text_mutex); 543 } 544 545 void alternatives_enable_smp(void) 546 { 547 struct smp_alt_module *mod; 548 549 /* Why bother if there are no other CPUs? */ 550 BUG_ON(num_possible_cpus() == 1); 551 552 mutex_lock(&text_mutex); 553 554 if (uniproc_patched) { 555 pr_info("switching to SMP code\n"); 556 BUG_ON(num_online_cpus() != 1); 557 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 558 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 559 list_for_each_entry(mod, &smp_alt_modules, next) 560 alternatives_smp_lock(mod->locks, mod->locks_end, 561 mod->text, mod->text_end); 562 uniproc_patched = false; 563 } 564 mutex_unlock(&text_mutex); 565 } 566 567 /* 568 * Return 1 if the address range is reserved for SMP-alternatives. 569 * Must hold text_mutex. 570 */ 571 int alternatives_text_reserved(void *start, void *end) 572 { 573 struct smp_alt_module *mod; 574 const s32 *poff; 575 u8 *text_start = start; 576 u8 *text_end = end; 577 578 lockdep_assert_held(&text_mutex); 579 580 list_for_each_entry(mod, &smp_alt_modules, next) { 581 if (mod->text > text_end || mod->text_end < text_start) 582 continue; 583 for (poff = mod->locks; poff < mod->locks_end; poff++) { 584 const u8 *ptr = (const u8 *)poff + *poff; 585 586 if (text_start <= ptr && text_end > ptr) 587 return 1; 588 } 589 } 590 591 return 0; 592 } 593 #endif /* CONFIG_SMP */ 594 595 #ifdef CONFIG_PARAVIRT 596 void __init_or_module apply_paravirt(struct paravirt_patch_site *start, 597 struct paravirt_patch_site *end) 598 { 599 struct paravirt_patch_site *p; 600 char insnbuf[MAX_PATCH_LEN]; 601 602 if (noreplace_paravirt) 603 return; 604 605 for (p = start; p < end; p++) { 606 unsigned int used; 607 608 BUG_ON(p->len > MAX_PATCH_LEN); 609 /* prep the buffer with the original instructions */ 610 memcpy(insnbuf, p->instr, p->len); 611 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, 612 (unsigned long)p->instr, p->len); 613 614 BUG_ON(used > p->len); 615 616 /* Pad the rest with nops */ 617 add_nops(insnbuf + used, p->len - used); 618 text_poke_early(p->instr, insnbuf, p->len); 619 } 620 } 621 extern struct paravirt_patch_site __start_parainstructions[], 622 __stop_parainstructions[]; 623 #endif /* CONFIG_PARAVIRT */ 624 625 void __init alternative_instructions(void) 626 { 627 /* The patching is not fully atomic, so try to avoid local interruptions 628 that might execute the to be patched code. 629 Other CPUs are not running. */ 630 stop_nmi(); 631 632 /* 633 * Don't stop machine check exceptions while patching. 634 * MCEs only happen when something got corrupted and in this 635 * case we must do something about the corruption. 636 * Ignoring it is worse than a unlikely patching race. 637 * Also machine checks tend to be broadcast and if one CPU 638 * goes into machine check the others follow quickly, so we don't 639 * expect a machine check to cause undue problems during to code 640 * patching. 641 */ 642 643 apply_alternatives(__alt_instructions, __alt_instructions_end); 644 645 #ifdef CONFIG_SMP 646 /* Patch to UP if other cpus not imminent. */ 647 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 648 uniproc_patched = true; 649 alternatives_smp_module_add(NULL, "core kernel", 650 __smp_locks, __smp_locks_end, 651 _text, _etext); 652 } 653 654 if (!uniproc_patched || num_possible_cpus() == 1) 655 free_init_pages("SMP alternatives", 656 (unsigned long)__smp_locks, 657 (unsigned long)__smp_locks_end); 658 #endif 659 660 apply_paravirt(__parainstructions, __parainstructions_end); 661 662 restart_nmi(); 663 alternatives_patched = 1; 664 } 665 666 /** 667 * text_poke_early - Update instructions on a live kernel at boot time 668 * @addr: address to modify 669 * @opcode: source of the copy 670 * @len: length to copy 671 * 672 * When you use this code to patch more than one byte of an instruction 673 * you need to make sure that other CPUs cannot execute this code in parallel. 674 * Also no thread must be currently preempted in the middle of these 675 * instructions. And on the local CPU you need to be protected again NMI or MCE 676 * handlers seeing an inconsistent instruction while you patch. 677 */ 678 void *__init_or_module text_poke_early(void *addr, const void *opcode, 679 size_t len) 680 { 681 unsigned long flags; 682 local_irq_save(flags); 683 memcpy(addr, opcode, len); 684 local_irq_restore(flags); 685 /* Could also do a CLFLUSH here to speed up CPU recovery; but 686 that causes hangs on some VIA CPUs. */ 687 return addr; 688 } 689 690 /** 691 * text_poke - Update instructions on a live kernel 692 * @addr: address to modify 693 * @opcode: source of the copy 694 * @len: length to copy 695 * 696 * Only atomic text poke/set should be allowed when not doing early patching. 697 * It means the size must be writable atomically and the address must be aligned 698 * in a way that permits an atomic write. It also makes sure we fit on a single 699 * page. 700 * 701 * Note: Must be called under text_mutex. 702 */ 703 void *text_poke(void *addr, const void *opcode, size_t len) 704 { 705 unsigned long flags; 706 char *vaddr; 707 struct page *pages[2]; 708 int i; 709 710 if (!core_kernel_text((unsigned long)addr)) { 711 pages[0] = vmalloc_to_page(addr); 712 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 713 } else { 714 pages[0] = virt_to_page(addr); 715 WARN_ON(!PageReserved(pages[0])); 716 pages[1] = virt_to_page(addr + PAGE_SIZE); 717 } 718 BUG_ON(!pages[0]); 719 local_irq_save(flags); 720 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); 721 if (pages[1]) 722 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); 723 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); 724 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 725 clear_fixmap(FIX_TEXT_POKE0); 726 if (pages[1]) 727 clear_fixmap(FIX_TEXT_POKE1); 728 local_flush_tlb(); 729 sync_core(); 730 /* Could also do a CLFLUSH here to speed up CPU recovery; but 731 that causes hangs on some VIA CPUs. */ 732 for (i = 0; i < len; i++) 733 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); 734 local_irq_restore(flags); 735 return addr; 736 } 737 738 static void do_sync_core(void *info) 739 { 740 sync_core(); 741 } 742 743 static bool bp_patching_in_progress; 744 static void *bp_int3_handler, *bp_int3_addr; 745 746 int poke_int3_handler(struct pt_regs *regs) 747 { 748 /* 749 * Having observed our INT3 instruction, we now must observe 750 * bp_patching_in_progress. 751 * 752 * in_progress = TRUE INT3 753 * WMB RMB 754 * write INT3 if (in_progress) 755 * 756 * Idem for bp_int3_handler. 757 */ 758 smp_rmb(); 759 760 if (likely(!bp_patching_in_progress)) 761 return 0; 762 763 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) 764 return 0; 765 766 /* set up the specified breakpoint handler */ 767 regs->ip = (unsigned long) bp_int3_handler; 768 769 return 1; 770 771 } 772 773 /** 774 * text_poke_bp() -- update instructions on live kernel on SMP 775 * @addr: address to patch 776 * @opcode: opcode of new instruction 777 * @len: length to copy 778 * @handler: address to jump to when the temporary breakpoint is hit 779 * 780 * Modify multi-byte instruction by using int3 breakpoint on SMP. 781 * We completely avoid stop_machine() here, and achieve the 782 * synchronization using int3 breakpoint. 783 * 784 * The way it is done: 785 * - add a int3 trap to the address that will be patched 786 * - sync cores 787 * - update all but the first byte of the patched range 788 * - sync cores 789 * - replace the first byte (int3) by the first byte of 790 * replacing opcode 791 * - sync cores 792 * 793 * Note: must be called under text_mutex. 794 */ 795 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) 796 { 797 unsigned char int3 = 0xcc; 798 799 bp_int3_handler = handler; 800 bp_int3_addr = (u8 *)addr + sizeof(int3); 801 bp_patching_in_progress = true; 802 /* 803 * Corresponding read barrier in int3 notifier for making sure the 804 * in_progress and handler are correctly ordered wrt. patching. 805 */ 806 smp_wmb(); 807 808 text_poke(addr, &int3, sizeof(int3)); 809 810 on_each_cpu(do_sync_core, NULL, 1); 811 812 if (len - sizeof(int3) > 0) { 813 /* patch all but the first byte */ 814 text_poke((char *)addr + sizeof(int3), 815 (const char *) opcode + sizeof(int3), 816 len - sizeof(int3)); 817 /* 818 * According to Intel, this core syncing is very likely 819 * not necessary and we'd be safe even without it. But 820 * better safe than sorry (plus there's not only Intel). 821 */ 822 on_each_cpu(do_sync_core, NULL, 1); 823 } 824 825 /* patch the first byte */ 826 text_poke(addr, opcode, sizeof(int3)); 827 828 on_each_cpu(do_sync_core, NULL, 1); 829 /* 830 * sync_core() implies an smp_mb() and orders this store against 831 * the writing of the new instruction. 832 */ 833 bp_patching_in_progress = false; 834 835 return addr; 836 } 837 838