1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AArch64 loadable module support. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "Modules: " fmt 11 12 #include <linux/bitops.h> 13 #include <linux/elf.h> 14 #include <linux/ftrace.h> 15 #include <linux/kasan.h> 16 #include <linux/kernel.h> 17 #include <linux/mm.h> 18 #include <linux/moduleloader.h> 19 #include <linux/random.h> 20 #include <linux/scs.h> 21 22 #include <asm/alternative.h> 23 #include <asm/insn.h> 24 #include <asm/scs.h> 25 #include <asm/sections.h> 26 27 enum aarch64_reloc_op { 28 RELOC_OP_NONE, 29 RELOC_OP_ABS, 30 RELOC_OP_PREL, 31 RELOC_OP_PAGE, 32 }; 33 34 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) 35 { 36 switch (reloc_op) { 37 case RELOC_OP_ABS: 38 return val; 39 case RELOC_OP_PREL: 40 return val - (u64)place; 41 case RELOC_OP_PAGE: 42 return (val & ~0xfff) - ((u64)place & ~0xfff); 43 case RELOC_OP_NONE: 44 return 0; 45 } 46 47 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); 48 return 0; 49 } 50 51 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) 52 { 53 s64 sval = do_reloc(op, place, val); 54 55 /* 56 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place 57 * relative and absolute relocations as having a range of [-2^15, 2^16) 58 * or [-2^31, 2^32), respectively. However, in order to be able to 59 * detect overflows reliably, we have to choose whether we interpret 60 * such quantities as signed or as unsigned, and stick with it. 61 * The way we organize our address space requires a signed 62 * interpretation of 32-bit relative references, so let's use that 63 * for all R_AARCH64_PRELxx relocations. This means our upper 64 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. 65 */ 66 67 switch (len) { 68 case 16: 69 *(s16 *)place = sval; 70 switch (op) { 71 case RELOC_OP_ABS: 72 if (sval < 0 || sval > U16_MAX) 73 return -ERANGE; 74 break; 75 case RELOC_OP_PREL: 76 if (sval < S16_MIN || sval > S16_MAX) 77 return -ERANGE; 78 break; 79 default: 80 pr_err("Invalid 16-bit data relocation (%d)\n", op); 81 return 0; 82 } 83 break; 84 case 32: 85 *(s32 *)place = sval; 86 switch (op) { 87 case RELOC_OP_ABS: 88 if (sval < 0 || sval > U32_MAX) 89 return -ERANGE; 90 break; 91 case RELOC_OP_PREL: 92 if (sval < S32_MIN || sval > S32_MAX) 93 return -ERANGE; 94 break; 95 default: 96 pr_err("Invalid 32-bit data relocation (%d)\n", op); 97 return 0; 98 } 99 break; 100 case 64: 101 *(s64 *)place = sval; 102 break; 103 default: 104 pr_err("Invalid length (%d) for data relocation\n", len); 105 return 0; 106 } 107 return 0; 108 } 109 110 enum aarch64_insn_movw_imm_type { 111 AARCH64_INSN_IMM_MOVNZ, 112 AARCH64_INSN_IMM_MOVKZ, 113 }; 114 115 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, 116 int lsb, enum aarch64_insn_movw_imm_type imm_type) 117 { 118 u64 imm; 119 s64 sval; 120 u32 insn = le32_to_cpu(*place); 121 122 sval = do_reloc(op, place, val); 123 imm = sval >> lsb; 124 125 if (imm_type == AARCH64_INSN_IMM_MOVNZ) { 126 /* 127 * For signed MOVW relocations, we have to manipulate the 128 * instruction encoding depending on whether or not the 129 * immediate is less than zero. 130 */ 131 insn &= ~(3 << 29); 132 if (sval >= 0) { 133 /* >=0: Set the instruction to MOVZ (opcode 10b). */ 134 insn |= 2 << 29; 135 } else { 136 /* 137 * <0: Set the instruction to MOVN (opcode 00b). 138 * Since we've masked the opcode already, we 139 * don't need to do anything other than 140 * inverting the new immediate field. 141 */ 142 imm = ~imm; 143 } 144 } 145 146 /* Update the instruction with the new encoding. */ 147 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); 148 *place = cpu_to_le32(insn); 149 150 if (imm > U16_MAX) 151 return -ERANGE; 152 153 return 0; 154 } 155 156 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, 157 int lsb, int len, enum aarch64_insn_imm_type imm_type) 158 { 159 u64 imm, imm_mask; 160 s64 sval; 161 u32 insn = le32_to_cpu(*place); 162 163 /* Calculate the relocation value. */ 164 sval = do_reloc(op, place, val); 165 sval >>= lsb; 166 167 /* Extract the value bits and shift them to bit 0. */ 168 imm_mask = (BIT(lsb + len) - 1) >> lsb; 169 imm = sval & imm_mask; 170 171 /* Update the instruction's immediate field. */ 172 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 173 *place = cpu_to_le32(insn); 174 175 /* 176 * Extract the upper value bits (including the sign bit) and 177 * shift them to bit 0. 178 */ 179 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 180 181 /* 182 * Overflow has occurred if the upper bits are not all equal to 183 * the sign bit of the value. 184 */ 185 if ((u64)(sval + 1) >= 2) 186 return -ERANGE; 187 188 return 0; 189 } 190 191 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, 192 __le32 *place, u64 val) 193 { 194 u32 insn; 195 196 if (!is_forbidden_offset_for_adrp(place)) 197 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, 198 AARCH64_INSN_IMM_ADR); 199 200 /* patch ADRP to ADR if it is in range */ 201 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, 202 AARCH64_INSN_IMM_ADR)) { 203 insn = le32_to_cpu(*place); 204 insn &= ~BIT(31); 205 } else { 206 /* out of range for ADR -> emit a veneer */ 207 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); 208 if (!val) 209 return -ENOEXEC; 210 insn = aarch64_insn_gen_branch_imm((u64)place, val, 211 AARCH64_INSN_BRANCH_NOLINK); 212 } 213 214 *place = cpu_to_le32(insn); 215 return 0; 216 } 217 218 int apply_relocate_add(Elf64_Shdr *sechdrs, 219 const char *strtab, 220 unsigned int symindex, 221 unsigned int relsec, 222 struct module *me) 223 { 224 unsigned int i; 225 int ovf; 226 bool overflow_check; 227 Elf64_Sym *sym; 228 void *loc; 229 u64 val; 230 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 231 232 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 233 /* loc corresponds to P in the AArch64 ELF document. */ 234 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 235 + rel[i].r_offset; 236 237 /* sym is the ELF symbol we're referring to. */ 238 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 239 + ELF64_R_SYM(rel[i].r_info); 240 241 /* val corresponds to (S + A) in the AArch64 ELF document. */ 242 val = sym->st_value + rel[i].r_addend; 243 244 /* Check for overflow by default. */ 245 overflow_check = true; 246 247 /* Perform the static relocation. */ 248 switch (ELF64_R_TYPE(rel[i].r_info)) { 249 /* Null relocations. */ 250 case R_ARM_NONE: 251 case R_AARCH64_NONE: 252 ovf = 0; 253 break; 254 255 /* Data relocations. */ 256 case R_AARCH64_ABS64: 257 overflow_check = false; 258 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); 259 break; 260 case R_AARCH64_ABS32: 261 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); 262 break; 263 case R_AARCH64_ABS16: 264 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); 265 break; 266 case R_AARCH64_PREL64: 267 overflow_check = false; 268 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); 269 break; 270 case R_AARCH64_PREL32: 271 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); 272 break; 273 case R_AARCH64_PREL16: 274 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); 275 break; 276 277 /* MOVW instruction relocations. */ 278 case R_AARCH64_MOVW_UABS_G0_NC: 279 overflow_check = false; 280 fallthrough; 281 case R_AARCH64_MOVW_UABS_G0: 282 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 283 AARCH64_INSN_IMM_MOVKZ); 284 break; 285 case R_AARCH64_MOVW_UABS_G1_NC: 286 overflow_check = false; 287 fallthrough; 288 case R_AARCH64_MOVW_UABS_G1: 289 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 290 AARCH64_INSN_IMM_MOVKZ); 291 break; 292 case R_AARCH64_MOVW_UABS_G2_NC: 293 overflow_check = false; 294 fallthrough; 295 case R_AARCH64_MOVW_UABS_G2: 296 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 297 AARCH64_INSN_IMM_MOVKZ); 298 break; 299 case R_AARCH64_MOVW_UABS_G3: 300 /* We're using the top bits so we can't overflow. */ 301 overflow_check = false; 302 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, 303 AARCH64_INSN_IMM_MOVKZ); 304 break; 305 case R_AARCH64_MOVW_SABS_G0: 306 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 307 AARCH64_INSN_IMM_MOVNZ); 308 break; 309 case R_AARCH64_MOVW_SABS_G1: 310 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 311 AARCH64_INSN_IMM_MOVNZ); 312 break; 313 case R_AARCH64_MOVW_SABS_G2: 314 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 315 AARCH64_INSN_IMM_MOVNZ); 316 break; 317 case R_AARCH64_MOVW_PREL_G0_NC: 318 overflow_check = false; 319 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 320 AARCH64_INSN_IMM_MOVKZ); 321 break; 322 case R_AARCH64_MOVW_PREL_G0: 323 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 324 AARCH64_INSN_IMM_MOVNZ); 325 break; 326 case R_AARCH64_MOVW_PREL_G1_NC: 327 overflow_check = false; 328 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 329 AARCH64_INSN_IMM_MOVKZ); 330 break; 331 case R_AARCH64_MOVW_PREL_G1: 332 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 333 AARCH64_INSN_IMM_MOVNZ); 334 break; 335 case R_AARCH64_MOVW_PREL_G2_NC: 336 overflow_check = false; 337 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 338 AARCH64_INSN_IMM_MOVKZ); 339 break; 340 case R_AARCH64_MOVW_PREL_G2: 341 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 342 AARCH64_INSN_IMM_MOVNZ); 343 break; 344 case R_AARCH64_MOVW_PREL_G3: 345 /* We're using the top bits so we can't overflow. */ 346 overflow_check = false; 347 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, 348 AARCH64_INSN_IMM_MOVNZ); 349 break; 350 351 /* Immediate instruction relocations. */ 352 case R_AARCH64_LD_PREL_LO19: 353 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 354 AARCH64_INSN_IMM_19); 355 break; 356 case R_AARCH64_ADR_PREL_LO21: 357 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 358 AARCH64_INSN_IMM_ADR); 359 break; 360 case R_AARCH64_ADR_PREL_PG_HI21_NC: 361 overflow_check = false; 362 fallthrough; 363 case R_AARCH64_ADR_PREL_PG_HI21: 364 ovf = reloc_insn_adrp(me, sechdrs, loc, val); 365 if (ovf && ovf != -ERANGE) 366 return ovf; 367 break; 368 case R_AARCH64_ADD_ABS_LO12_NC: 369 case R_AARCH64_LDST8_ABS_LO12_NC: 370 overflow_check = false; 371 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, 372 AARCH64_INSN_IMM_12); 373 break; 374 case R_AARCH64_LDST16_ABS_LO12_NC: 375 overflow_check = false; 376 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, 377 AARCH64_INSN_IMM_12); 378 break; 379 case R_AARCH64_LDST32_ABS_LO12_NC: 380 overflow_check = false; 381 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, 382 AARCH64_INSN_IMM_12); 383 break; 384 case R_AARCH64_LDST64_ABS_LO12_NC: 385 overflow_check = false; 386 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, 387 AARCH64_INSN_IMM_12); 388 break; 389 case R_AARCH64_LDST128_ABS_LO12_NC: 390 overflow_check = false; 391 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, 392 AARCH64_INSN_IMM_12); 393 break; 394 case R_AARCH64_TSTBR14: 395 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, 396 AARCH64_INSN_IMM_14); 397 break; 398 case R_AARCH64_CONDBR19: 399 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 400 AARCH64_INSN_IMM_19); 401 break; 402 case R_AARCH64_JUMP26: 403 case R_AARCH64_CALL26: 404 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 405 AARCH64_INSN_IMM_26); 406 if (ovf == -ERANGE) { 407 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); 408 if (!val) 409 return -ENOEXEC; 410 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 411 26, AARCH64_INSN_IMM_26); 412 } 413 break; 414 415 default: 416 pr_err("module %s: unsupported RELA relocation: %llu\n", 417 me->name, ELF64_R_TYPE(rel[i].r_info)); 418 return -ENOEXEC; 419 } 420 421 if (overflow_check && ovf == -ERANGE) 422 goto overflow; 423 424 } 425 426 return 0; 427 428 overflow: 429 pr_err("module %s: overflow in relocation type %d val %Lx\n", 430 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 431 return -ENOEXEC; 432 } 433 434 static inline void __init_plt(struct plt_entry *plt, unsigned long addr) 435 { 436 *plt = get_plt_entry(addr, plt); 437 } 438 439 static int module_init_ftrace_plt(const Elf_Ehdr *hdr, 440 const Elf_Shdr *sechdrs, 441 struct module *mod) 442 { 443 #if defined(CONFIG_DYNAMIC_FTRACE) 444 const Elf_Shdr *s; 445 struct plt_entry *plts; 446 447 s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); 448 if (!s) 449 return -ENOEXEC; 450 451 plts = (void *)s->sh_addr; 452 453 __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); 454 455 mod->arch.ftrace_trampolines = plts; 456 #endif 457 return 0; 458 } 459 460 int module_finalize(const Elf_Ehdr *hdr, 461 const Elf_Shdr *sechdrs, 462 struct module *me) 463 { 464 const Elf_Shdr *s; 465 int ret; 466 467 s = find_section(hdr, sechdrs, ".altinstructions"); 468 if (s) 469 apply_alternatives_module((void *)s->sh_addr, s->sh_size); 470 471 if (scs_is_dynamic()) { 472 s = find_section(hdr, sechdrs, ".init.eh_frame"); 473 if (s) { 474 ret = __pi_scs_patch((void *)s->sh_addr, s->sh_size); 475 if (ret) 476 pr_err("module %s: error occurred during dynamic SCS patching (%d)\n", 477 me->name, ret); 478 } 479 } 480 481 return module_init_ftrace_plt(hdr, sechdrs, me); 482 } 483