1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * alternative runtime patching 4 * inspired by the x86 version 5 * 6 * Copyright (C) 2014 ARM Ltd. 7 */ 8 9 #define pr_fmt(fmt) "alternatives: " fmt 10 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/elf.h> 14 #include <asm/cacheflush.h> 15 #include <asm/alternative.h> 16 #include <asm/cpufeature.h> 17 #include <asm/insn.h> 18 #include <asm/module.h> 19 #include <asm/sections.h> 20 #include <asm/vdso.h> 21 #include <linux/stop_machine.h> 22 23 #define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f) 24 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) 25 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) 26 27 #define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT) 28 #define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT) 29 30 /* Volatile, as we may be patching the guts of READ_ONCE() */ 31 static volatile int all_alternatives_applied; 32 33 static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); 34 35 struct alt_region { 36 struct alt_instr *begin; 37 struct alt_instr *end; 38 }; 39 40 bool alternative_is_applied(u16 cpucap) 41 { 42 if (WARN_ON(cpucap >= ARM64_NCAPS)) 43 return false; 44 45 return test_bit(cpucap, applied_alternatives); 46 } 47 48 /* 49 * Check if the target PC is within an alternative block. 50 */ 51 static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) 52 { 53 unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); 54 return !(pc >= replptr && pc <= (replptr + alt->alt_len)); 55 } 56 57 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) 58 59 static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) 60 { 61 u32 insn; 62 63 insn = le32_to_cpu(*altinsnptr); 64 65 if (aarch64_insn_is_branch_imm(insn)) { 66 s32 offset = aarch64_get_branch_offset(insn); 67 unsigned long target; 68 69 target = (unsigned long)altinsnptr + offset; 70 71 /* 72 * If we're branching inside the alternate sequence, 73 * do not rewrite the instruction, as it is already 74 * correct. Otherwise, generate the new instruction. 75 */ 76 if (branch_insn_requires_update(alt, target)) { 77 offset = target - (unsigned long)insnptr; 78 insn = aarch64_set_branch_offset(insn, offset); 79 } 80 } else if (aarch64_insn_is_adrp(insn)) { 81 s32 orig_offset, new_offset; 82 unsigned long target; 83 84 /* 85 * If we're replacing an adrp instruction, which uses PC-relative 86 * immediate addressing, adjust the offset to reflect the new 87 * PC. adrp operates on 4K aligned addresses. 88 */ 89 orig_offset = aarch64_insn_adrp_get_offset(insn); 90 target = align_down(altinsnptr, SZ_4K) + orig_offset; 91 new_offset = target - align_down(insnptr, SZ_4K); 92 insn = aarch64_insn_adrp_set_offset(insn, new_offset); 93 } else if (aarch64_insn_uses_literal(insn)) { 94 /* 95 * Disallow patching unhandled instructions using PC relative 96 * literal addresses 97 */ 98 BUG(); 99 } 100 101 return insn; 102 } 103 104 static noinstr void patch_alternative(struct alt_instr *alt, 105 __le32 *origptr, __le32 *updptr, int nr_inst) 106 { 107 __le32 *replptr; 108 int i; 109 110 replptr = ALT_REPL_PTR(alt); 111 for (i = 0; i < nr_inst; i++) { 112 u32 insn; 113 114 insn = get_alt_insn(alt, origptr + i, replptr + i); 115 updptr[i] = cpu_to_le32(insn); 116 } 117 } 118 119 /* 120 * We provide our own, private D-cache cleaning function so that we don't 121 * accidentally call into the cache.S code, which is patched by us at 122 * runtime. 123 */ 124 static noinstr void clean_dcache_range_nopatch(u64 start, u64 end) 125 { 126 u64 cur, d_size, ctr_el0; 127 128 ctr_el0 = arm64_ftr_reg_ctrel0.sys_val; 129 d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0, 130 CTR_EL0_DminLine_SHIFT); 131 cur = start & ~(d_size - 1); 132 do { 133 /* 134 * We must clean+invalidate to the PoC in order to avoid 135 * Cortex-A53 errata 826319, 827319, 824069 and 819472 136 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE) 137 */ 138 asm volatile("dc civac, %0" : : "r" (cur) : "memory"); 139 } while (cur += d_size, cur < end); 140 } 141 142 static int __apply_alternatives(const struct alt_region *region, 143 bool is_module, 144 unsigned long *cpucap_mask) 145 { 146 struct alt_instr *alt; 147 __le32 *origptr, *updptr; 148 alternative_cb_t alt_cb; 149 150 for (alt = region->begin; alt < region->end; alt++) { 151 int nr_inst; 152 int cap = ALT_CAP(alt); 153 154 if (!test_bit(cap, cpucap_mask)) 155 continue; 156 157 if (!cpus_have_cap(cap)) 158 continue; 159 160 if (ALT_HAS_CB(alt)) 161 BUG_ON(alt->alt_len != 0); 162 else 163 BUG_ON(alt->alt_len != alt->orig_len); 164 165 origptr = ALT_ORIG_PTR(alt); 166 updptr = is_module ? origptr : lm_alias(origptr); 167 nr_inst = alt->orig_len / AARCH64_INSN_SIZE; 168 169 if (ALT_HAS_CB(alt)) { 170 alt_cb = ALT_REPL_PTR(alt); 171 if (is_module && !core_kernel_text((unsigned long)alt_cb)) 172 return -ENOEXEC; 173 } else { 174 alt_cb = patch_alternative; 175 } 176 177 alt_cb(alt, origptr, updptr, nr_inst); 178 179 if (!is_module) { 180 clean_dcache_range_nopatch((u64)origptr, 181 (u64)(origptr + nr_inst)); 182 } 183 } 184 185 /* 186 * The core module code takes care of cache maintenance in 187 * flush_module_icache(). 188 */ 189 if (!is_module) { 190 dsb(ish); 191 icache_inval_all_pou(); 192 isb(); 193 194 bitmap_or(applied_alternatives, applied_alternatives, 195 cpucap_mask, ARM64_NCAPS); 196 bitmap_and(applied_alternatives, applied_alternatives, 197 system_cpucaps, ARM64_NCAPS); 198 } 199 200 return 0; 201 } 202 203 static void __init apply_alternatives_vdso(void) 204 { 205 struct alt_region region; 206 const struct elf64_hdr *hdr; 207 const struct elf64_shdr *shdr; 208 const struct elf64_shdr *alt; 209 DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); 210 211 bitmap_fill(all_capabilities, ARM64_NCAPS); 212 213 hdr = (struct elf64_hdr *)vdso_start; 214 shdr = (void *)hdr + hdr->e_shoff; 215 alt = find_section(hdr, shdr, ".altinstructions"); 216 if (!alt) 217 return; 218 219 region = (struct alt_region){ 220 .begin = (void *)hdr + alt->sh_offset, 221 .end = (void *)hdr + alt->sh_offset + alt->sh_size, 222 }; 223 224 __apply_alternatives(®ion, false, &all_capabilities[0]); 225 } 226 227 static const struct alt_region kernel_alternatives __initconst = { 228 .begin = (struct alt_instr *)__alt_instructions, 229 .end = (struct alt_instr *)__alt_instructions_end, 230 }; 231 232 /* 233 * We might be patching the stop_machine state machine, so implement a 234 * really simple polling protocol here. 235 */ 236 static int __init __apply_alternatives_multi_stop(void *unused) 237 { 238 /* We always have a CPU 0 at this point (__init) */ 239 if (smp_processor_id()) { 240 while (!all_alternatives_applied) 241 cpu_relax(); 242 isb(); 243 } else { 244 DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS); 245 246 bitmap_complement(remaining_capabilities, boot_cpucaps, 247 ARM64_NCAPS); 248 249 BUG_ON(all_alternatives_applied); 250 __apply_alternatives(&kernel_alternatives, false, 251 remaining_capabilities); 252 /* Barriers provided by the cache flushing */ 253 all_alternatives_applied = 1; 254 } 255 256 return 0; 257 } 258 259 void __init apply_alternatives_all(void) 260 { 261 pr_info("applying system-wide alternatives\n"); 262 263 apply_alternatives_vdso(); 264 /* better not try code patching on a live SMP system */ 265 stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); 266 } 267 268 /* 269 * This is called very early in the boot process (directly after we run 270 * a feature detect on the boot CPU). No need to worry about other CPUs 271 * here. 272 */ 273 void __init apply_boot_alternatives(void) 274 { 275 /* If called on non-boot cpu things could go wrong */ 276 WARN_ON(smp_processor_id() != 0); 277 278 pr_info("applying boot alternatives\n"); 279 280 __apply_alternatives(&kernel_alternatives, false, 281 &boot_cpucaps[0]); 282 } 283 284 #ifdef CONFIG_MODULES 285 int apply_alternatives_module(void *start, size_t length) 286 { 287 struct alt_region region = { 288 .begin = start, 289 .end = start + length, 290 }; 291 DECLARE_BITMAP(all_capabilities, ARM64_NCAPS); 292 293 bitmap_fill(all_capabilities, ARM64_NCAPS); 294 295 return __apply_alternatives(®ion, true, &all_capabilities[0]); 296 } 297 #endif 298 299 noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, 300 __le32 *updptr, int nr_inst) 301 { 302 for (int i = 0; i < nr_inst; i++) 303 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop()); 304 } 305 EXPORT_SYMBOL(alt_cb_patch_nops); 306