1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Kernel module help for x86. 3 Copyright (C) 2001 Rusty Russell. 4 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/moduleloader.h> 10 #include <linux/elf.h> 11 #include <linux/vmalloc.h> 12 #include <linux/fs.h> 13 #include <linux/string.h> 14 #include <linux/kernel.h> 15 #include <linux/kasan.h> 16 #include <linux/bug.h> 17 #include <linux/mm.h> 18 #include <linux/gfp.h> 19 #include <linux/jump_label.h> 20 #include <linux/random.h> 21 #include <linux/memory.h> 22 #include <linux/stackprotector.h> 23 24 #include <asm/text-patching.h> 25 #include <asm/page.h> 26 #include <asm/setup.h> 27 #include <asm/unwind.h> 28 29 #if 0 30 #define DEBUGP(fmt, ...) \ 31 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 32 #else 33 #define DEBUGP(fmt, ...) \ 34 do { \ 35 if (0) \ 36 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 37 } while (0) 38 #endif 39 40 #ifdef CONFIG_X86_32 41 int apply_relocate(Elf32_Shdr *sechdrs, 42 const char *strtab, 43 unsigned int symindex, 44 unsigned int relsec, 45 struct module *me) 46 { 47 unsigned int i; 48 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 49 Elf32_Sym *sym; 50 uint32_t *location; 51 52 DEBUGP("Applying relocate section %u to %u\n", 53 relsec, sechdrs[relsec].sh_info); 54 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 55 /* This is where to make the change */ 56 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 57 + rel[i].r_offset; 58 /* This is the symbol it is referring to. Note that all 59 undefined symbols have been resolved. */ 60 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 61 + ELF32_R_SYM(rel[i].r_info); 62 63 switch (ELF32_R_TYPE(rel[i].r_info)) { 64 case R_386_32: 65 /* We add the value into the location given */ 66 *location += sym->st_value; 67 break; 68 case R_386_PC32: 69 case R_386_PLT32: 70 /* Add the value, subtract its position */ 71 *location += sym->st_value - (uint32_t)location; 72 break; 73 default: 74 pr_err("%s: Unknown relocation: %u\n", 75 me->name, ELF32_R_TYPE(rel[i].r_info)); 76 return -ENOEXEC; 77 } 78 } 79 return 0; 80 } 81 #else /*X86_64*/ 82 static int __write_relocate_add(Elf64_Shdr *sechdrs, 83 const char *strtab, 84 unsigned int symindex, 85 unsigned int relsec, 86 struct module *me, 87 void *(*write)(void *dest, const void *src, size_t len), 88 bool apply) 89 { 90 unsigned int i; 91 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 92 Elf64_Sym *sym; 93 void *loc; 94 u64 val; 95 u64 zero = 0ULL; 96 97 DEBUGP("%s relocate section %u to %u\n", 98 apply ? "Applying" : "Clearing", 99 relsec, sechdrs[relsec].sh_info); 100 101 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 102 size_t size; 103 104 /* This is where to make the change */ 105 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 106 + rel[i].r_offset; 107 108 /* This is the symbol it is referring to. Note that all 109 undefined symbols have been resolved. */ 110 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 111 + ELF64_R_SYM(rel[i].r_info); 112 113 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 114 (int)ELF64_R_TYPE(rel[i].r_info), 115 sym->st_value, rel[i].r_addend, (u64)loc); 116 117 val = sym->st_value + rel[i].r_addend; 118 119 switch (ELF64_R_TYPE(rel[i].r_info)) { 120 case R_X86_64_NONE: 121 continue; /* nothing to write */ 122 case R_X86_64_64: 123 size = 8; 124 break; 125 case R_X86_64_32: 126 if (val != *(u32 *)&val) 127 goto overflow; 128 size = 4; 129 break; 130 case R_X86_64_32S: 131 if ((s64)val != *(s32 *)&val) 132 goto overflow; 133 size = 4; 134 break; 135 #if defined(CONFIG_STACKPROTECTOR) && \ 136 defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000 137 case R_X86_64_REX_GOTPCRELX: { 138 static unsigned long __percpu *const addr = &__stack_chk_guard; 139 140 if (sym->st_value != (u64)addr) { 141 pr_err("%s: Unsupported GOTPCREL relocation\n", me->name); 142 return -ENOEXEC; 143 } 144 145 val = (u64)&addr + rel[i].r_addend; 146 fallthrough; 147 } 148 #endif 149 case R_X86_64_PC32: 150 case R_X86_64_PLT32: 151 val -= (u64)loc; 152 size = 4; 153 break; 154 case R_X86_64_PC64: 155 val -= (u64)loc; 156 size = 8; 157 break; 158 default: 159 pr_err("%s: Unknown rela relocation: %llu\n", 160 me->name, ELF64_R_TYPE(rel[i].r_info)); 161 return -ENOEXEC; 162 } 163 164 if (apply) { 165 if (memcmp(loc, &zero, size)) { 166 pr_err("x86/modules: Invalid relocation target, existing value is nonzero for sec %u, idx %u, type %d, loc %lx, val %llx\n", 167 relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), 168 (unsigned long)loc, val); 169 return -ENOEXEC; 170 } 171 write(loc, &val, size); 172 } else { 173 if (memcmp(loc, &val, size)) { 174 pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for sec %u, idx %u, type %d, loc %lx, val %llx\n", 175 relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), 176 (unsigned long)loc, val); 177 return -ENOEXEC; 178 } 179 write(loc, &zero, size); 180 } 181 } 182 return 0; 183 184 overflow: 185 pr_err("overflow in relocation type %d val %llx sec %u idx %d\n", 186 (int)ELF64_R_TYPE(rel[i].r_info), val, relsec, i); 187 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 188 me->name); 189 return -ENOEXEC; 190 } 191 192 static int write_relocate_add(Elf64_Shdr *sechdrs, 193 const char *strtab, 194 unsigned int symindex, 195 unsigned int relsec, 196 struct module *me, 197 bool apply) 198 { 199 int ret; 200 bool early = me->state == MODULE_STATE_UNFORMED; 201 void *(*write)(void *, const void *, size_t) = memcpy; 202 203 if (!early) { 204 write = text_poke; 205 mutex_lock(&text_mutex); 206 } 207 208 ret = __write_relocate_add(sechdrs, strtab, symindex, relsec, me, 209 write, apply); 210 211 if (!early) { 212 smp_text_poke_sync_each_cpu(); 213 mutex_unlock(&text_mutex); 214 } 215 216 return ret; 217 } 218 219 int apply_relocate_add(Elf64_Shdr *sechdrs, 220 const char *strtab, 221 unsigned int symindex, 222 unsigned int relsec, 223 struct module *me) 224 { 225 return write_relocate_add(sechdrs, strtab, symindex, relsec, me, true); 226 } 227 228 #ifdef CONFIG_LIVEPATCH 229 void clear_relocate_add(Elf64_Shdr *sechdrs, 230 const char *strtab, 231 unsigned int symindex, 232 unsigned int relsec, 233 struct module *me) 234 { 235 write_relocate_add(sechdrs, strtab, symindex, relsec, me, false); 236 } 237 #endif 238 239 #endif 240 241 int module_finalize(const Elf_Ehdr *hdr, 242 const Elf_Shdr *sechdrs, 243 struct module *me) 244 { 245 const Elf_Shdr *s, *alt = NULL, *locks = NULL, 246 *orc = NULL, *orc_ip = NULL, 247 *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, 248 *calls = NULL, *cfi = NULL; 249 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 250 251 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 252 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 253 alt = s; 254 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 255 locks = s; 256 if (!strcmp(".orc_unwind", secstrings + s->sh_name)) 257 orc = s; 258 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) 259 orc_ip = s; 260 if (!strcmp(".retpoline_sites", secstrings + s->sh_name)) 261 retpolines = s; 262 if (!strcmp(".return_sites", secstrings + s->sh_name)) 263 returns = s; 264 if (!strcmp(".call_sites", secstrings + s->sh_name)) 265 calls = s; 266 if (!strcmp(".cfi_sites", secstrings + s->sh_name)) 267 cfi = s; 268 if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name)) 269 ibt_endbr = s; 270 } 271 272 its_init_mod(me); 273 274 if (retpolines || cfi) { 275 void *rseg = NULL, *cseg = NULL; 276 unsigned int rsize = 0, csize = 0; 277 278 if (retpolines) { 279 rseg = (void *)retpolines->sh_addr; 280 rsize = retpolines->sh_size; 281 } 282 283 if (cfi) { 284 cseg = (void *)cfi->sh_addr; 285 csize = cfi->sh_size; 286 } 287 288 apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize); 289 } 290 if (retpolines) { 291 void *rseg = (void *)retpolines->sh_addr; 292 apply_retpolines(rseg, rseg + retpolines->sh_size); 293 } 294 295 its_fini_mod(me); 296 297 if (returns) { 298 void *rseg = (void *)returns->sh_addr; 299 apply_returns(rseg, rseg + returns->sh_size); 300 } 301 if (calls) { 302 struct callthunk_sites cs = {}; 303 304 cs.call_start = (void *)calls->sh_addr; 305 cs.call_end = (void *)calls->sh_addr + calls->sh_size; 306 307 callthunks_patch_module_calls(&cs, me); 308 } 309 if (alt) { 310 /* patch .altinstructions */ 311 void *aseg = (void *)alt->sh_addr; 312 apply_alternatives(aseg, aseg + alt->sh_size); 313 } 314 if (ibt_endbr) { 315 void *iseg = (void *)ibt_endbr->sh_addr; 316 apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size); 317 } 318 if (locks) { 319 void *lseg = (void *)locks->sh_addr; 320 void *text = me->mem[MOD_TEXT].base; 321 void *text_end = text + me->mem[MOD_TEXT].size; 322 alternatives_smp_module_add(me, me->name, 323 lseg, lseg + locks->sh_size, 324 text, text_end); 325 } 326 327 if (orc && orc_ip) 328 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, 329 (void *)orc->sh_addr, orc->sh_size); 330 331 return 0; 332 } 333 334 void module_arch_cleanup(struct module *mod) 335 { 336 alternatives_smp_module_del(mod); 337 its_free_mod(mod); 338 } 339