1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Kernel module help for x86. 3 Copyright (C) 2001 Rusty Russell. 4 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/moduleloader.h> 10 #include <linux/elf.h> 11 #include <linux/vmalloc.h> 12 #include <linux/fs.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/kasan.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/gfp.h> 19 #include <linux/jump_label.h> 20 #include <linux/random.h> 21 #include <linux/memory.h> 22 #include <linux/execmem.h> 23 24 #include <asm/text-patching.h> 25 #include <asm/page.h> 26 #include <asm/setup.h> 27 #include <asm/unwind.h> 28 29 #if 0 30 #define DEBUGP(fmt, ...) \ 31 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 32 #else 33 #define DEBUGP(fmt, ...) \ 34 do { \ 35 if (0) \ 36 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 37 } while (0) 38 #endif 39 40 static struct execmem_info execmem_info __ro_after_init; 41 42 struct execmem_info __init *execmem_arch_setup(void) 43 { 44 unsigned long start, offset = 0; 45 46 if (kaslr_enabled()) 47 offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE; 48 49 start = MODULES_VADDR + offset; 50 51 execmem_info = (struct execmem_info){ 52 .ranges = { 53 [EXECMEM_DEFAULT] = { 54 .flags = EXECMEM_KASAN_SHADOW, 55 .start = start, 56 .end = MODULES_END, 57 .pgprot = PAGE_KERNEL, 58 .alignment = MODULE_ALIGN, 59 }, 60 }, 61 }; 62 63 return &execmem_info; 64 } 65 66 #ifdef CONFIG_X86_32 67 int apply_relocate(Elf32_Shdr *sechdrs, 68 const char *strtab, 69 unsigned int symindex, 70 unsigned int relsec, 71 struct module *me) 72 { 73 unsigned int i; 74 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 75 Elf32_Sym *sym; 76 uint32_t *location; 77 78 DEBUGP("Applying relocate section %u to %u\n", 79 relsec, sechdrs[relsec].sh_info); 80 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 81 /* This is where to make the change */ 82 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 83 + rel[i].r_offset; 84 /* This is the symbol it is referring to. Note that all 85 undefined symbols have been resolved. */ 86 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 87 + ELF32_R_SYM(rel[i].r_info); 88 89 switch (ELF32_R_TYPE(rel[i].r_info)) { 90 case R_386_32: 91 /* We add the value into the location given */ 92 *location += sym->st_value; 93 break; 94 case R_386_PC32: 95 case R_386_PLT32: 96 /* Add the value, subtract its position */ 97 *location += sym->st_value - (uint32_t)location; 98 break; 99 default: 100 pr_err("%s: Unknown relocation: %u\n", 101 me->name, ELF32_R_TYPE(rel[i].r_info)); 102 return -ENOEXEC; 103 } 104 } 105 return 0; 106 } 107 #else /*X86_64*/ 108 static int __write_relocate_add(Elf64_Shdr *sechdrs, 109 const char *strtab, 110 unsigned int symindex, 111 unsigned int relsec, 112 struct module *me, 113 void *(*write)(void *dest, const void *src, size_t len), 114 bool apply) 115 { 116 unsigned int i; 117 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 118 Elf64_Sym *sym; 119 void *loc; 120 u64 val; 121 u64 zero = 0ULL; 122 123 DEBUGP("%s relocate section %u to %u\n", 124 apply ? "Applying" : "Clearing", 125 relsec, sechdrs[relsec].sh_info); 126 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 127 size_t size; 128 129 /* This is where to make the change */ 130 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 131 + rel[i].r_offset; 132 133 /* This is the symbol it is referring to. Note that all 134 undefined symbols have been resolved. */ 135 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 136 + ELF64_R_SYM(rel[i].r_info); 137 138 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 139 (int)ELF64_R_TYPE(rel[i].r_info), 140 sym->st_value, rel[i].r_addend, (u64)loc); 141 142 val = sym->st_value + rel[i].r_addend; 143 144 switch (ELF64_R_TYPE(rel[i].r_info)) { 145 case R_X86_64_NONE: 146 continue; /* nothing to write */ 147 case R_X86_64_64: 148 size = 8; 149 break; 150 case R_X86_64_32: 151 if (val != *(u32 *)&val) 152 goto overflow; 153 size = 4; 154 break; 155 case R_X86_64_32S: 156 if ((s64)val != *(s32 *)&val) 157 goto overflow; 158 size = 4; 159 break; 160 case R_X86_64_PC32: 161 case R_X86_64_PLT32: 162 val -= (u64)loc; 163 size = 4; 164 break; 165 case R_X86_64_PC64: 166 val -= (u64)loc; 167 size = 8; 168 break; 169 default: 170 pr_err("%s: Unknown rela relocation: %llu\n", 171 me->name, ELF64_R_TYPE(rel[i].r_info)); 172 return -ENOEXEC; 173 } 174 175 if (apply) { 176 if (memcmp(loc, &zero, size)) { 177 pr_err("x86/modules: Invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 178 (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 179 return -ENOEXEC; 180 } 181 write(loc, &val, size); 182 } else { 183 if (memcmp(loc, &val, size)) { 184 pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for type %d, loc %p, val %Lx\n", 185 (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 186 return -ENOEXEC; 187 } 188 write(loc, &zero, size); 189 } 190 } 191 return 0; 192 193 overflow: 194 pr_err("overflow in relocation type %d val %Lx\n", 195 (int)ELF64_R_TYPE(rel[i].r_info), val); 196 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 197 me->name); 198 return -ENOEXEC; 199 } 200 201 static int write_relocate_add(Elf64_Shdr *sechdrs, 202 const char *strtab, 203 unsigned int symindex, 204 unsigned int relsec, 205 struct module *me, 206 bool apply) 207 { 208 int ret; 209 bool early = me->state == MODULE_STATE_UNFORMED; 210 void *(*write)(void *, const void *, size_t) = memcpy; 211 212 if (!early) { 213 write = text_poke; 214 mutex_lock(&text_mutex); 215 } 216 217 ret = __write_relocate_add(sechdrs, strtab, symindex, relsec, me, 218 write, apply); 219 220 if (!early) { 221 text_poke_sync(); 222 mutex_unlock(&text_mutex); 223 } 224 225 return ret; 226 } 227 228 int apply_relocate_add(Elf64_Shdr *sechdrs, 229 const char *strtab, 230 unsigned int symindex, 231 unsigned int relsec, 232 struct module *me) 233 { 234 return write_relocate_add(sechdrs, strtab, symindex, relsec, me, true); 235 } 236 237 #ifdef CONFIG_LIVEPATCH 238 void clear_relocate_add(Elf64_Shdr *sechdrs, 239 const char *strtab, 240 unsigned int symindex, 241 unsigned int relsec, 242 struct module *me) 243 { 244 write_relocate_add(sechdrs, strtab, symindex, relsec, me, false); 245 } 246 #endif 247 248 #endif 249 250 int module_finalize(const Elf_Ehdr *hdr, 251 const Elf_Shdr *sechdrs, 252 struct module *me) 253 { 254 const Elf_Shdr *s, *alt = NULL, *locks = NULL, 255 *orc = NULL, *orc_ip = NULL, 256 *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, 257 *calls = NULL, *cfi = NULL; 258 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 259 260 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 261 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 262 alt = s; 263 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 264 locks = s; 265 if (!strcmp(".orc_unwind", secstrings + s->sh_name)) 266 orc = s; 267 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) 268 orc_ip = s; 269 if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) 270 retpolines = s; 271 if (!strcmp(".return_sites", secstrings + s->sh_name)) 272 returns = s; 273 if (!strcmp(".call_sites", secstrings + s->sh_name)) 274 calls = s; 275 if (!strcmp(".cfi_sites", secstrings + s->sh_name)) 276 cfi = s; 277 if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name)) 278 ibt_endbr = s; 279 } 280 281 if (retpolines || cfi) { 282 void *rseg = NULL, *cseg = NULL; 283 unsigned int rsize = 0, csize = 0; 284 285 if (retpolines) { 286 rseg = (void *)retpolines->sh_addr; 287 rsize = retpolines->sh_size; 288 } 289 290 if (cfi) { 291 cseg = (void *)cfi->sh_addr; 292 csize = cfi->sh_size; 293 } 294 295 apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize); 296 } 297 if (retpolines) { 298 void *rseg = (void *)retpolines->sh_addr; 299 apply_retpolines(rseg, rseg + retpolines->sh_size); 300 } 301 if (returns) { 302 void *rseg = (void *)returns->sh_addr; 303 apply_returns(rseg, rseg + returns->sh_size); 304 } 305 if (alt) { 306 /* patch .altinstructions */ 307 void *aseg = (void *)alt->sh_addr; 308 apply_alternatives(aseg, aseg + alt->sh_size); 309 } 310 if (calls || alt) { 311 struct callthunk_sites cs = {}; 312 313 if (calls) { 314 cs.call_start = (void *)calls->sh_addr; 315 cs.call_end = (void *)calls->sh_addr + calls->sh_size; 316 } 317 318 if (alt) { 319 cs.alt_start = (void *)alt->sh_addr; 320 cs.alt_end = (void *)alt->sh_addr + alt->sh_size; 321 } 322 323 callthunks_patch_module_calls(&cs, me); 324 } 325 if (ibt_endbr) { 326 void *iseg = (void *)ibt_endbr->sh_addr; 327 apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size); 328 } 329 if (locks) { 330 void *lseg = (void *)locks->sh_addr; 331 void *text = me->mem[MOD_TEXT].base; 332 void *text_end = text + me->mem[MOD_TEXT].size; 333 alternatives_smp_module_add(me, me->name, 334 lseg, lseg + locks->sh_size, 335 text, text_end); 336 } 337 338 if (orc && orc_ip) 339 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, 340 (void *)orc->sh_addr, orc->sh_size); 341 342 return 0; 343 } 344 345 void module_arch_cleanup(struct module *mod) 346 { 347 alternatives_smp_module_del(mod); 348 } 349