1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 4 * 5 * Modifications for ppc64: 6 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 7 * 8 * Copyright 2008 Michael Ellerman, IBM Corporation. 9 */ 10 11 #include <linux/types.h> 12 #include <linux/jump_label.h> 13 #include <linux/kernel.h> 14 #include <linux/string.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/stop_machine.h> 18 #include <asm/cputable.h> 19 #include <asm/code-patching.h> 20 #include <asm/page.h> 21 #include <asm/sections.h> 22 #include <asm/setup.h> 23 #include <asm/security_features.h> 24 #include <asm/firmware.h> 25 #include <asm/inst.h> 26 27 struct fixup_entry { 28 unsigned long mask; 29 unsigned long value; 30 long start_off; 31 long end_off; 32 long alt_start_off; 33 long alt_end_off; 34 }; 35 36 static u32 *calc_addr(struct fixup_entry *fcur, long offset) 37 { 38 /* 39 * We store the offset to the code as a negative offset from 40 * the start of the alt_entry, to support the VDSO. This 41 * routine converts that back into an actual address. 42 */ 43 return (u32 *)((unsigned long)fcur + offset); 44 } 45 46 static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end) 47 { 48 int err; 49 struct ppc_inst instr; 50 51 instr = ppc_inst_read(src); 52 53 if (instr_is_relative_branch(ppc_inst_read(src))) { 54 u32 *target = (u32 *)branch_target(src); 55 56 /* Branch within the section doesn't need translating */ 57 if (target < alt_start || target > alt_end) { 58 err = translate_branch(&instr, dest, src); 59 if (err) 60 return 1; 61 } 62 } 63 64 raw_patch_instruction(dest, instr); 65 66 return 0; 67 } 68 69 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) 70 { 71 u32 *start, *end, *alt_start, *alt_end, *src, *dest; 72 73 start = calc_addr(fcur, fcur->start_off); 74 end = calc_addr(fcur, fcur->end_off); 75 alt_start = calc_addr(fcur, fcur->alt_start_off); 76 alt_end = calc_addr(fcur, fcur->alt_end_off); 77 78 if ((alt_end - alt_start) > (end - start)) 79 return 1; 80 81 if ((value & fcur->mask) == fcur->value) 82 return 0; 83 84 src = alt_start; 85 dest = start; 86 87 for (; src < alt_end; src = ppc_inst_next(src, src), 88 dest = ppc_inst_next(dest, dest)) { 89 if (patch_alt_instruction(src, dest, alt_start, alt_end)) 90 return 1; 91 } 92 93 for (; dest < end; dest++) 94 raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP())); 95 96 return 0; 97 } 98 99 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) 100 { 101 struct fixup_entry *fcur, *fend; 102 103 fcur = fixup_start; 104 fend = fixup_end; 105 106 for (; fcur < fend; fcur++) { 107 if (patch_feature_section(value, fcur)) { 108 WARN_ON(1); 109 printk("Unable to patch feature section at %p - %p" \ 110 " with %p - %p\n", 111 calc_addr(fcur, fcur->start_off), 112 calc_addr(fcur, fcur->end_off), 113 calc_addr(fcur, fcur->alt_start_off), 114 calc_addr(fcur, fcur->alt_end_off)); 115 } 116 } 117 } 118 119 #ifdef CONFIG_PPC_BOOK3S_64 120 static void do_stf_entry_barrier_fixups(enum stf_barrier_type types) 121 { 122 unsigned int instrs[3], *dest; 123 long *start, *end; 124 int i; 125 126 start = PTRRELOC(&__start___stf_entry_barrier_fixup); 127 end = PTRRELOC(&__stop___stf_entry_barrier_fixup); 128 129 instrs[0] = PPC_RAW_NOP(); 130 instrs[1] = PPC_RAW_NOP(); 131 instrs[2] = PPC_RAW_NOP(); 132 133 i = 0; 134 if (types & STF_BARRIER_FALLBACK) { 135 instrs[i++] = PPC_RAW_MFLR(_R10); 136 instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ 137 instrs[i++] = PPC_RAW_MTLR(_R10); 138 } else if (types & STF_BARRIER_EIEIO) { 139 instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ 140 } else if (types & STF_BARRIER_SYNC_ORI) { 141 instrs[i++] = PPC_RAW_SYNC(); 142 instrs[i++] = PPC_RAW_LD(_R10, _R13, 0); 143 instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 144 } 145 146 for (i = 0; start < end; start++, i++) { 147 dest = (void *)start + *start; 148 149 pr_devel("patching dest %lx\n", (unsigned long)dest); 150 151 // See comment in do_entry_flush_fixups() RE order of patching 152 if (types & STF_BARRIER_FALLBACK) { 153 patch_instruction(dest, ppc_inst(instrs[0])); 154 patch_instruction(dest + 2, ppc_inst(instrs[2])); 155 patch_branch(dest + 1, 156 (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK); 157 } else { 158 patch_instruction(dest + 1, ppc_inst(instrs[1])); 159 patch_instruction(dest + 2, ppc_inst(instrs[2])); 160 patch_instruction(dest, ppc_inst(instrs[0])); 161 } 162 } 163 164 printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, 165 (types == STF_BARRIER_NONE) ? "no" : 166 (types == STF_BARRIER_FALLBACK) ? "fallback" : 167 (types == STF_BARRIER_EIEIO) ? "eieio" : 168 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" 169 : "unknown"); 170 } 171 172 static void do_stf_exit_barrier_fixups(enum stf_barrier_type types) 173 { 174 unsigned int instrs[6], *dest; 175 long *start, *end; 176 int i; 177 178 start = PTRRELOC(&__start___stf_exit_barrier_fixup); 179 end = PTRRELOC(&__stop___stf_exit_barrier_fixup); 180 181 instrs[0] = PPC_RAW_NOP(); 182 instrs[1] = PPC_RAW_NOP(); 183 instrs[2] = PPC_RAW_NOP(); 184 instrs[3] = PPC_RAW_NOP(); 185 instrs[4] = PPC_RAW_NOP(); 186 instrs[5] = PPC_RAW_NOP(); 187 188 i = 0; 189 if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { 190 if (cpu_has_feature(CPU_FTR_HVMODE)) { 191 instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13); 192 instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0); 193 } else { 194 instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13); 195 instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1); 196 } 197 instrs[i++] = PPC_RAW_SYNC(); 198 instrs[i++] = PPC_RAW_LD(_R13, _R13, 0); 199 instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 200 if (cpu_has_feature(CPU_FTR_HVMODE)) 201 instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1); 202 else 203 instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2); 204 } else if (types & STF_BARRIER_EIEIO) { 205 instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */ 206 } 207 208 for (i = 0; start < end; start++, i++) { 209 dest = (void *)start + *start; 210 211 pr_devel("patching dest %lx\n", (unsigned long)dest); 212 213 patch_instruction(dest, ppc_inst(instrs[0])); 214 patch_instruction(dest + 1, ppc_inst(instrs[1])); 215 patch_instruction(dest + 2, ppc_inst(instrs[2])); 216 patch_instruction(dest + 3, ppc_inst(instrs[3])); 217 patch_instruction(dest + 4, ppc_inst(instrs[4])); 218 patch_instruction(dest + 5, ppc_inst(instrs[5])); 219 } 220 printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, 221 (types == STF_BARRIER_NONE) ? "no" : 222 (types == STF_BARRIER_FALLBACK) ? "fallback" : 223 (types == STF_BARRIER_EIEIO) ? "eieio" : 224 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" 225 : "unknown"); 226 } 227 228 static int __do_stf_barrier_fixups(void *data) 229 { 230 enum stf_barrier_type *types = data; 231 232 do_stf_entry_barrier_fixups(*types); 233 do_stf_exit_barrier_fixups(*types); 234 235 return 0; 236 } 237 238 void do_stf_barrier_fixups(enum stf_barrier_type types) 239 { 240 /* 241 * The call to the fallback entry flush, and the fallback/sync-ori exit 242 * flush can not be safely patched in/out while other CPUs are executing 243 * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs 244 * spin in the stop machine core with interrupts hard disabled. 245 */ 246 stop_machine(__do_stf_barrier_fixups, &types, NULL); 247 } 248 249 void do_uaccess_flush_fixups(enum l1d_flush_type types) 250 { 251 unsigned int instrs[4], *dest; 252 long *start, *end; 253 int i; 254 255 start = PTRRELOC(&__start___uaccess_flush_fixup); 256 end = PTRRELOC(&__stop___uaccess_flush_fixup); 257 258 instrs[0] = PPC_RAW_NOP(); 259 instrs[1] = PPC_RAW_NOP(); 260 instrs[2] = PPC_RAW_NOP(); 261 instrs[3] = PPC_RAW_BLR(); 262 263 i = 0; 264 if (types == L1D_FLUSH_FALLBACK) { 265 instrs[3] = PPC_RAW_NOP(); 266 /* fallthrough to fallback flush */ 267 } 268 269 if (types & L1D_FLUSH_ORI) { 270 instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 271 instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ 272 } 273 274 if (types & L1D_FLUSH_MTTRIG) 275 instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); 276 277 for (i = 0; start < end; start++, i++) { 278 dest = (void *)start + *start; 279 280 pr_devel("patching dest %lx\n", (unsigned long)dest); 281 282 patch_instruction(dest, ppc_inst(instrs[0])); 283 284 patch_instruction(dest + 1, ppc_inst(instrs[1])); 285 patch_instruction(dest + 2, ppc_inst(instrs[2])); 286 patch_instruction(dest + 3, ppc_inst(instrs[3])); 287 } 288 289 printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, 290 (types == L1D_FLUSH_NONE) ? "no" : 291 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : 292 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) 293 ? "ori+mttrig type" 294 : "ori type" : 295 (types & L1D_FLUSH_MTTRIG) ? "mttrig type" 296 : "unknown"); 297 } 298 299 static int __do_entry_flush_fixups(void *data) 300 { 301 enum l1d_flush_type types = *(enum l1d_flush_type *)data; 302 unsigned int instrs[3], *dest; 303 long *start, *end; 304 int i; 305 306 instrs[0] = PPC_RAW_NOP(); 307 instrs[1] = PPC_RAW_NOP(); 308 instrs[2] = PPC_RAW_NOP(); 309 310 i = 0; 311 if (types == L1D_FLUSH_FALLBACK) { 312 instrs[i++] = PPC_RAW_MFLR(_R10); 313 instrs[i++] = PPC_RAW_NOP(); /* branch patched below */ 314 instrs[i++] = PPC_RAW_MTLR(_R10); 315 } 316 317 if (types & L1D_FLUSH_ORI) { 318 instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 319 instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ 320 } 321 322 if (types & L1D_FLUSH_MTTRIG) 323 instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); 324 325 /* 326 * If we're patching in or out the fallback flush we need to be careful about the 327 * order in which we patch instructions. That's because it's possible we could 328 * take a page fault after patching one instruction, so the sequence of 329 * instructions must be safe even in a half patched state. 330 * 331 * To make that work, when patching in the fallback flush we patch in this order: 332 * - the mflr (dest) 333 * - the mtlr (dest + 2) 334 * - the branch (dest + 1) 335 * 336 * That ensures the sequence is safe to execute at any point. In contrast if we 337 * patch the mtlr last, it's possible we could return from the branch and not 338 * restore LR, leading to a crash later. 339 * 340 * When patching out the fallback flush (either with nops or another flush type), 341 * we patch in this order: 342 * - the branch (dest + 1) 343 * - the mtlr (dest + 2) 344 * - the mflr (dest) 345 * 346 * Note we are protected by stop_machine() from other CPUs executing the code in a 347 * semi-patched state. 348 */ 349 350 start = PTRRELOC(&__start___entry_flush_fixup); 351 end = PTRRELOC(&__stop___entry_flush_fixup); 352 for (i = 0; start < end; start++, i++) { 353 dest = (void *)start + *start; 354 355 pr_devel("patching dest %lx\n", (unsigned long)dest); 356 357 if (types == L1D_FLUSH_FALLBACK) { 358 patch_instruction(dest, ppc_inst(instrs[0])); 359 patch_instruction(dest + 2, ppc_inst(instrs[2])); 360 patch_branch(dest + 1, 361 (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK); 362 } else { 363 patch_instruction(dest + 1, ppc_inst(instrs[1])); 364 patch_instruction(dest + 2, ppc_inst(instrs[2])); 365 patch_instruction(dest, ppc_inst(instrs[0])); 366 } 367 } 368 369 start = PTRRELOC(&__start___scv_entry_flush_fixup); 370 end = PTRRELOC(&__stop___scv_entry_flush_fixup); 371 for (; start < end; start++, i++) { 372 dest = (void *)start + *start; 373 374 pr_devel("patching dest %lx\n", (unsigned long)dest); 375 376 if (types == L1D_FLUSH_FALLBACK) { 377 patch_instruction(dest, ppc_inst(instrs[0])); 378 patch_instruction(dest + 2, ppc_inst(instrs[2])); 379 patch_branch(dest + 1, 380 (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK); 381 } else { 382 patch_instruction(dest + 1, ppc_inst(instrs[1])); 383 patch_instruction(dest + 2, ppc_inst(instrs[2])); 384 patch_instruction(dest, ppc_inst(instrs[0])); 385 } 386 } 387 388 389 printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, 390 (types == L1D_FLUSH_NONE) ? "no" : 391 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : 392 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) 393 ? "ori+mttrig type" 394 : "ori type" : 395 (types & L1D_FLUSH_MTTRIG) ? "mttrig type" 396 : "unknown"); 397 398 return 0; 399 } 400 401 void do_entry_flush_fixups(enum l1d_flush_type types) 402 { 403 /* 404 * The call to the fallback flush can not be safely patched in/out while 405 * other CPUs are executing it. So call __do_entry_flush_fixups() on one 406 * CPU while all other CPUs spin in the stop machine core with interrupts 407 * hard disabled. 408 */ 409 stop_machine(__do_entry_flush_fixups, &types, NULL); 410 } 411 412 void do_rfi_flush_fixups(enum l1d_flush_type types) 413 { 414 unsigned int instrs[3], *dest; 415 long *start, *end; 416 int i; 417 418 start = PTRRELOC(&__start___rfi_flush_fixup); 419 end = PTRRELOC(&__stop___rfi_flush_fixup); 420 421 instrs[0] = PPC_RAW_NOP(); 422 instrs[1] = PPC_RAW_NOP(); 423 instrs[2] = PPC_RAW_NOP(); 424 425 if (types & L1D_FLUSH_FALLBACK) 426 /* b .+16 to fallback flush */ 427 instrs[0] = PPC_INST_BRANCH | 16; 428 429 i = 0; 430 if (types & L1D_FLUSH_ORI) { 431 instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 432 instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */ 433 } 434 435 if (types & L1D_FLUSH_MTTRIG) 436 instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0); 437 438 for (i = 0; start < end; start++, i++) { 439 dest = (void *)start + *start; 440 441 pr_devel("patching dest %lx\n", (unsigned long)dest); 442 443 patch_instruction(dest, ppc_inst(instrs[0])); 444 patch_instruction(dest + 1, ppc_inst(instrs[1])); 445 patch_instruction(dest + 2, ppc_inst(instrs[2])); 446 } 447 448 printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, 449 (types == L1D_FLUSH_NONE) ? "no" : 450 (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : 451 (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) 452 ? "ori+mttrig type" 453 : "ori type" : 454 (types & L1D_FLUSH_MTTRIG) ? "mttrig type" 455 : "unknown"); 456 } 457 458 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) 459 { 460 unsigned int instr, *dest; 461 long *start, *end; 462 int i; 463 464 start = fixup_start; 465 end = fixup_end; 466 467 instr = PPC_RAW_NOP(); 468 469 if (enable) { 470 pr_info("barrier-nospec: using ORI speculation barrier\n"); 471 instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */ 472 } 473 474 for (i = 0; start < end; start++, i++) { 475 dest = (void *)start + *start; 476 477 pr_devel("patching dest %lx\n", (unsigned long)dest); 478 patch_instruction(dest, ppc_inst(instr)); 479 } 480 481 printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); 482 } 483 484 #endif /* CONFIG_PPC_BOOK3S_64 */ 485 486 #ifdef CONFIG_PPC_BARRIER_NOSPEC 487 void do_barrier_nospec_fixups(bool enable) 488 { 489 void *start, *end; 490 491 start = PTRRELOC(&__start___barrier_nospec_fixup); 492 end = PTRRELOC(&__stop___barrier_nospec_fixup); 493 494 do_barrier_nospec_fixups_range(enable, start, end); 495 } 496 #endif /* CONFIG_PPC_BARRIER_NOSPEC */ 497 498 #ifdef CONFIG_PPC_FSL_BOOK3E 499 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) 500 { 501 unsigned int instr[2], *dest; 502 long *start, *end; 503 int i; 504 505 start = fixup_start; 506 end = fixup_end; 507 508 instr[0] = PPC_RAW_NOP(); 509 instr[1] = PPC_RAW_NOP(); 510 511 if (enable) { 512 pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); 513 instr[0] = PPC_RAW_ISYNC(); 514 instr[1] = PPC_RAW_SYNC(); 515 } 516 517 for (i = 0; start < end; start++, i++) { 518 dest = (void *)start + *start; 519 520 pr_devel("patching dest %lx\n", (unsigned long)dest); 521 patch_instruction(dest, ppc_inst(instr[0])); 522 patch_instruction(dest + 1, ppc_inst(instr[1])); 523 } 524 525 printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); 526 } 527 528 static void patch_btb_flush_section(long *curr) 529 { 530 unsigned int *start, *end; 531 532 start = (void *)curr + *curr; 533 end = (void *)curr + *(curr + 1); 534 for (; start < end; start++) { 535 pr_devel("patching dest %lx\n", (unsigned long)start); 536 patch_instruction(start, ppc_inst(PPC_RAW_NOP())); 537 } 538 } 539 540 void do_btb_flush_fixups(void) 541 { 542 long *start, *end; 543 544 start = PTRRELOC(&__start__btb_flush_fixup); 545 end = PTRRELOC(&__stop__btb_flush_fixup); 546 547 for (; start < end; start += 2) 548 patch_btb_flush_section(start); 549 } 550 #endif /* CONFIG_PPC_FSL_BOOK3E */ 551 552 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 553 { 554 long *start, *end; 555 u32 *dest; 556 557 if (!(value & CPU_FTR_LWSYNC)) 558 return ; 559 560 start = fixup_start; 561 end = fixup_end; 562 563 for (; start < end; start++) { 564 dest = (void *)start + *start; 565 raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC)); 566 } 567 } 568 569 static void do_final_fixups(void) 570 { 571 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) 572 struct ppc_inst inst; 573 u32 *src, *dest, *end; 574 575 if (PHYSICAL_START == 0) 576 return; 577 578 src = (u32 *)(KERNELBASE + PHYSICAL_START); 579 dest = (u32 *)KERNELBASE; 580 end = (void *)src + (__end_interrupts - _stext); 581 582 while (src < end) { 583 inst = ppc_inst_read(src); 584 raw_patch_instruction(dest, inst); 585 src = ppc_inst_next(src, src); 586 dest = ppc_inst_next(dest, dest); 587 } 588 #endif 589 } 590 591 static unsigned long __initdata saved_cpu_features; 592 static unsigned int __initdata saved_mmu_features; 593 #ifdef CONFIG_PPC64 594 static unsigned long __initdata saved_firmware_features; 595 #endif 596 597 void __init apply_feature_fixups(void) 598 { 599 struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec)); 600 601 *PTRRELOC(&saved_cpu_features) = spec->cpu_features; 602 *PTRRELOC(&saved_mmu_features) = spec->mmu_features; 603 604 /* 605 * Apply the CPU-specific and firmware specific fixups to kernel text 606 * (nop out sections not relevant to this CPU or this firmware). 607 */ 608 do_feature_fixups(spec->cpu_features, 609 PTRRELOC(&__start___ftr_fixup), 610 PTRRELOC(&__stop___ftr_fixup)); 611 612 do_feature_fixups(spec->mmu_features, 613 PTRRELOC(&__start___mmu_ftr_fixup), 614 PTRRELOC(&__stop___mmu_ftr_fixup)); 615 616 do_lwsync_fixups(spec->cpu_features, 617 PTRRELOC(&__start___lwsync_fixup), 618 PTRRELOC(&__stop___lwsync_fixup)); 619 620 #ifdef CONFIG_PPC64 621 saved_firmware_features = powerpc_firmware_features; 622 do_feature_fixups(powerpc_firmware_features, 623 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 624 #endif 625 do_final_fixups(); 626 } 627 628 void __init setup_feature_keys(void) 629 { 630 /* 631 * Initialise jump label. This causes all the cpu/mmu_has_feature() 632 * checks to take on their correct polarity based on the current set of 633 * CPU/MMU features. 634 */ 635 jump_label_init(); 636 cpu_feature_keys_init(); 637 mmu_feature_keys_init(); 638 } 639 640 static int __init check_features(void) 641 { 642 WARN(saved_cpu_features != cur_cpu_spec->cpu_features, 643 "CPU features changed after feature patching!\n"); 644 WARN(saved_mmu_features != cur_cpu_spec->mmu_features, 645 "MMU features changed after feature patching!\n"); 646 #ifdef CONFIG_PPC64 647 WARN(saved_firmware_features != powerpc_firmware_features, 648 "Firmware features changed after feature patching!\n"); 649 #endif 650 651 return 0; 652 } 653 late_initcall(check_features); 654 655 #ifdef CONFIG_FTR_FIXUP_SELFTEST 656 657 #define check(x) \ 658 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__); 659 660 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */ 661 static struct fixup_entry fixup; 662 663 static long calc_offset(struct fixup_entry *entry, unsigned int *p) 664 { 665 return (unsigned long)p - (unsigned long)entry; 666 } 667 668 static void test_basic_patching(void) 669 { 670 extern unsigned int ftr_fixup_test1[]; 671 extern unsigned int end_ftr_fixup_test1[]; 672 extern unsigned int ftr_fixup_test1_orig[]; 673 extern unsigned int ftr_fixup_test1_expected[]; 674 int size = 4 * (end_ftr_fixup_test1 - ftr_fixup_test1); 675 676 fixup.value = fixup.mask = 8; 677 fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); 678 fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); 679 fixup.alt_start_off = fixup.alt_end_off = 0; 680 681 /* Sanity check */ 682 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); 683 684 /* Check we don't patch if the value matches */ 685 patch_feature_section(8, &fixup); 686 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); 687 688 /* Check we do patch if the value doesn't match */ 689 patch_feature_section(0, &fixup); 690 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); 691 692 /* Check we do patch if the mask doesn't match */ 693 memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); 694 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); 695 patch_feature_section(~8, &fixup); 696 check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); 697 } 698 699 static void test_alternative_patching(void) 700 { 701 extern unsigned int ftr_fixup_test2[]; 702 extern unsigned int end_ftr_fixup_test2[]; 703 extern unsigned int ftr_fixup_test2_orig[]; 704 extern unsigned int ftr_fixup_test2_alt[]; 705 extern unsigned int ftr_fixup_test2_expected[]; 706 int size = 4 * (end_ftr_fixup_test2 - ftr_fixup_test2); 707 708 fixup.value = fixup.mask = 0xF; 709 fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); 710 fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); 711 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); 712 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); 713 714 /* Sanity check */ 715 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); 716 717 /* Check we don't patch if the value matches */ 718 patch_feature_section(0xF, &fixup); 719 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); 720 721 /* Check we do patch if the value doesn't match */ 722 patch_feature_section(0, &fixup); 723 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); 724 725 /* Check we do patch if the mask doesn't match */ 726 memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); 727 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); 728 patch_feature_section(~0xF, &fixup); 729 check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); 730 } 731 732 static void test_alternative_case_too_big(void) 733 { 734 extern unsigned int ftr_fixup_test3[]; 735 extern unsigned int end_ftr_fixup_test3[]; 736 extern unsigned int ftr_fixup_test3_orig[]; 737 extern unsigned int ftr_fixup_test3_alt[]; 738 int size = 4 * (end_ftr_fixup_test3 - ftr_fixup_test3); 739 740 fixup.value = fixup.mask = 0xC; 741 fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); 742 fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); 743 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); 744 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); 745 746 /* Sanity check */ 747 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); 748 749 /* Expect nothing to be patched, and the error returned to us */ 750 check(patch_feature_section(0xF, &fixup) == 1); 751 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); 752 check(patch_feature_section(0, &fixup) == 1); 753 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); 754 check(patch_feature_section(~0xF, &fixup) == 1); 755 check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); 756 } 757 758 static void test_alternative_case_too_small(void) 759 { 760 extern unsigned int ftr_fixup_test4[]; 761 extern unsigned int end_ftr_fixup_test4[]; 762 extern unsigned int ftr_fixup_test4_orig[]; 763 extern unsigned int ftr_fixup_test4_alt[]; 764 extern unsigned int ftr_fixup_test4_expected[]; 765 int size = 4 * (end_ftr_fixup_test4 - ftr_fixup_test4); 766 unsigned long flag; 767 768 /* Check a high-bit flag */ 769 flag = 1UL << ((sizeof(unsigned long) - 1) * 8); 770 fixup.value = fixup.mask = flag; 771 fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); 772 fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); 773 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); 774 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); 775 776 /* Sanity check */ 777 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); 778 779 /* Check we don't patch if the value matches */ 780 patch_feature_section(flag, &fixup); 781 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); 782 783 /* Check we do patch if the value doesn't match */ 784 patch_feature_section(0, &fixup); 785 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); 786 787 /* Check we do patch if the mask doesn't match */ 788 memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); 789 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); 790 patch_feature_section(~flag, &fixup); 791 check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); 792 } 793 794 static void test_alternative_case_with_branch(void) 795 { 796 extern unsigned int ftr_fixup_test5[]; 797 extern unsigned int end_ftr_fixup_test5[]; 798 extern unsigned int ftr_fixup_test5_expected[]; 799 int size = 4 * (end_ftr_fixup_test5 - ftr_fixup_test5); 800 801 check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); 802 } 803 804 static void test_alternative_case_with_external_branch(void) 805 { 806 extern unsigned int ftr_fixup_test6[]; 807 extern unsigned int end_ftr_fixup_test6[]; 808 extern unsigned int ftr_fixup_test6_expected[]; 809 int size = 4 * (end_ftr_fixup_test6 - ftr_fixup_test6); 810 811 check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); 812 } 813 814 static void test_alternative_case_with_branch_to_end(void) 815 { 816 extern unsigned int ftr_fixup_test7[]; 817 extern unsigned int end_ftr_fixup_test7[]; 818 extern unsigned int ftr_fixup_test7_expected[]; 819 int size = 4 * (end_ftr_fixup_test7 - ftr_fixup_test7); 820 821 check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0); 822 } 823 824 static void test_cpu_macros(void) 825 { 826 extern u8 ftr_fixup_test_FTR_macros[]; 827 extern u8 ftr_fixup_test_FTR_macros_expected[]; 828 unsigned long size = ftr_fixup_test_FTR_macros_expected - 829 ftr_fixup_test_FTR_macros; 830 831 /* The fixups have already been done for us during boot */ 832 check(memcmp(ftr_fixup_test_FTR_macros, 833 ftr_fixup_test_FTR_macros_expected, size) == 0); 834 } 835 836 static void test_fw_macros(void) 837 { 838 #ifdef CONFIG_PPC64 839 extern u8 ftr_fixup_test_FW_FTR_macros[]; 840 extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; 841 unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - 842 ftr_fixup_test_FW_FTR_macros; 843 844 /* The fixups have already been done for us during boot */ 845 check(memcmp(ftr_fixup_test_FW_FTR_macros, 846 ftr_fixup_test_FW_FTR_macros_expected, size) == 0); 847 #endif 848 } 849 850 static void test_lwsync_macros(void) 851 { 852 extern u8 lwsync_fixup_test[]; 853 extern u8 end_lwsync_fixup_test[]; 854 extern u8 lwsync_fixup_test_expected_LWSYNC[]; 855 extern u8 lwsync_fixup_test_expected_SYNC[]; 856 unsigned long size = end_lwsync_fixup_test - 857 lwsync_fixup_test; 858 859 /* The fixups have already been done for us during boot */ 860 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { 861 check(memcmp(lwsync_fixup_test, 862 lwsync_fixup_test_expected_LWSYNC, size) == 0); 863 } else { 864 check(memcmp(lwsync_fixup_test, 865 lwsync_fixup_test_expected_SYNC, size) == 0); 866 } 867 } 868 869 #ifdef CONFIG_PPC64 870 static void __init test_prefix_patching(void) 871 { 872 extern unsigned int ftr_fixup_prefix1[]; 873 extern unsigned int end_ftr_fixup_prefix1[]; 874 extern unsigned int ftr_fixup_prefix1_orig[]; 875 extern unsigned int ftr_fixup_prefix1_expected[]; 876 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1); 877 878 fixup.value = fixup.mask = 8; 879 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1); 880 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3); 881 fixup.alt_start_off = fixup.alt_end_off = 0; 882 883 /* Sanity check */ 884 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0); 885 886 patch_feature_section(0, &fixup); 887 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0); 888 check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0); 889 } 890 891 static void __init test_prefix_alt_patching(void) 892 { 893 extern unsigned int ftr_fixup_prefix2[]; 894 extern unsigned int end_ftr_fixup_prefix2[]; 895 extern unsigned int ftr_fixup_prefix2_orig[]; 896 extern unsigned int ftr_fixup_prefix2_expected[]; 897 extern unsigned int ftr_fixup_prefix2_alt[]; 898 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2); 899 900 fixup.value = fixup.mask = 8; 901 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1); 902 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3); 903 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt); 904 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2); 905 /* Sanity check */ 906 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0); 907 908 patch_feature_section(0, &fixup); 909 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0); 910 check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0); 911 } 912 913 static void __init test_prefix_word_alt_patching(void) 914 { 915 extern unsigned int ftr_fixup_prefix3[]; 916 extern unsigned int end_ftr_fixup_prefix3[]; 917 extern unsigned int ftr_fixup_prefix3_orig[]; 918 extern unsigned int ftr_fixup_prefix3_expected[]; 919 extern unsigned int ftr_fixup_prefix3_alt[]; 920 int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3); 921 922 fixup.value = fixup.mask = 8; 923 fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1); 924 fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4); 925 fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt); 926 fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3); 927 /* Sanity check */ 928 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0); 929 930 patch_feature_section(0, &fixup); 931 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0); 932 patch_feature_section(0, &fixup); 933 check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0); 934 } 935 #else 936 static inline void test_prefix_patching(void) {} 937 static inline void test_prefix_alt_patching(void) {} 938 static inline void test_prefix_word_alt_patching(void) {} 939 #endif /* CONFIG_PPC64 */ 940 941 static int __init test_feature_fixups(void) 942 { 943 printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); 944 945 test_basic_patching(); 946 test_alternative_patching(); 947 test_alternative_case_too_big(); 948 test_alternative_case_too_small(); 949 test_alternative_case_with_branch(); 950 test_alternative_case_with_external_branch(); 951 test_alternative_case_with_branch_to_end(); 952 test_cpu_macros(); 953 test_fw_macros(); 954 test_lwsync_macros(); 955 test_prefix_patching(); 956 test_prefix_alt_patching(); 957 test_prefix_word_alt_patching(); 958 959 return 0; 960 } 961 late_initcall(test_feature_fixups); 962 963 #endif /* CONFIG_FTR_FIXUP_SELFTEST */ 964