1 //===- ARM.cpp ------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "InputFiles.h" 10 #include "Symbols.h" 11 #include "SyntheticSections.h" 12 #include "Target.h" 13 #include "Thunks.h" 14 #include "lld/Common/ErrorHandler.h" 15 #include "llvm/Object/ELF.h" 16 #include "llvm/Support/Endian.h" 17 18 using namespace llvm; 19 using namespace llvm::support::endian; 20 using namespace llvm::ELF; 21 using namespace lld; 22 using namespace lld::elf; 23 24 namespace { 25 class ARM final : public TargetInfo { 26 public: 27 ARM(); 28 uint32_t calcEFlags() const override; 29 RelExpr getRelExpr(RelType type, const Symbol &s, 30 const uint8_t *loc) const override; 31 RelType getDynRel(RelType type) const override; 32 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override; 33 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 34 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override; 35 void writePltHeader(uint8_t *buf) const override; 36 void writePlt(uint8_t *buf, const Symbol &sym, 37 uint64_t pltEntryAddr) const override; 38 void addPltSymbols(InputSection &isec, uint64_t off) const override; 39 void addPltHeaderSymbols(InputSection &isd) const override; 40 bool needsThunk(RelExpr expr, RelType type, const InputFile *file, 41 uint64_t branchAddr, const Symbol &s, 42 int64_t a) const override; 43 uint32_t getThunkSectionSpacing() const override; 44 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override; 45 void relocate(uint8_t *loc, const Relocation &rel, 46 uint64_t val) const override; 47 }; 48 } // namespace 49 50 ARM::ARM() { 51 copyRel = R_ARM_COPY; 52 relativeRel = R_ARM_RELATIVE; 53 iRelativeRel = R_ARM_IRELATIVE; 54 gotRel = R_ARM_GLOB_DAT; 55 pltRel = R_ARM_JUMP_SLOT; 56 symbolicRel = R_ARM_ABS32; 57 tlsGotRel = R_ARM_TLS_TPOFF32; 58 tlsModuleIndexRel = R_ARM_TLS_DTPMOD32; 59 tlsOffsetRel = R_ARM_TLS_DTPOFF32; 60 pltHeaderSize = 32; 61 pltEntrySize = 16; 62 ipltEntrySize = 16; 63 trapInstr = {0xd4, 0xd4, 0xd4, 0xd4}; 64 needsThunks = true; 65 defaultMaxPageSize = 65536; 66 } 67 68 uint32_t ARM::calcEFlags() const { 69 // The ABIFloatType is used by loaders to detect the floating point calling 70 // convention. 71 uint32_t abiFloatType = 0; 72 if (config->armVFPArgs == ARMVFPArgKind::Base || 73 config->armVFPArgs == ARMVFPArgKind::Default) 74 abiFloatType = EF_ARM_ABI_FLOAT_SOFT; 75 else if (config->armVFPArgs == ARMVFPArgKind::VFP) 76 abiFloatType = EF_ARM_ABI_FLOAT_HARD; 77 78 // We don't currently use any features incompatible with EF_ARM_EABI_VER5, 79 // but we don't have any firm guarantees of conformance. Linux AArch64 80 // kernels (as of 2016) require an EABI version to be set. 81 return EF_ARM_EABI_VER5 | abiFloatType; 82 } 83 84 RelExpr ARM::getRelExpr(RelType type, const Symbol &s, 85 const uint8_t *loc) const { 86 switch (type) { 87 case R_ARM_ABS32: 88 case R_ARM_MOVW_ABS_NC: 89 case R_ARM_MOVT_ABS: 90 case R_ARM_THM_MOVW_ABS_NC: 91 case R_ARM_THM_MOVT_ABS: 92 return R_ABS; 93 case R_ARM_THM_JUMP8: 94 case R_ARM_THM_JUMP11: 95 return R_PC; 96 case R_ARM_CALL: 97 case R_ARM_JUMP24: 98 case R_ARM_PC24: 99 case R_ARM_PLT32: 100 case R_ARM_PREL31: 101 case R_ARM_THM_JUMP19: 102 case R_ARM_THM_JUMP24: 103 case R_ARM_THM_CALL: 104 return R_PLT_PC; 105 case R_ARM_GOTOFF32: 106 // (S + A) - GOT_ORG 107 return R_GOTREL; 108 case R_ARM_GOT_BREL: 109 // GOT(S) + A - GOT_ORG 110 return R_GOT_OFF; 111 case R_ARM_GOT_PREL: 112 case R_ARM_TLS_IE32: 113 // GOT(S) + A - P 114 return R_GOT_PC; 115 case R_ARM_SBREL32: 116 return R_ARM_SBREL; 117 case R_ARM_TARGET1: 118 return config->target1Rel ? R_PC : R_ABS; 119 case R_ARM_TARGET2: 120 if (config->target2 == Target2Policy::Rel) 121 return R_PC; 122 if (config->target2 == Target2Policy::Abs) 123 return R_ABS; 124 return R_GOT_PC; 125 case R_ARM_TLS_GD32: 126 return R_TLSGD_PC; 127 case R_ARM_TLS_LDM32: 128 return R_TLSLD_PC; 129 case R_ARM_TLS_LDO32: 130 return R_DTPREL; 131 case R_ARM_BASE_PREL: 132 // B(S) + A - P 133 // FIXME: currently B(S) assumed to be .got, this may not hold for all 134 // platforms. 135 return R_GOTONLY_PC; 136 case R_ARM_MOVW_PREL_NC: 137 case R_ARM_MOVT_PREL: 138 case R_ARM_REL32: 139 case R_ARM_THM_MOVW_PREL_NC: 140 case R_ARM_THM_MOVT_PREL: 141 return R_PC; 142 case R_ARM_ALU_PC_G0: 143 case R_ARM_ALU_PC_G0_NC: 144 case R_ARM_ALU_PC_G1: 145 case R_ARM_ALU_PC_G1_NC: 146 case R_ARM_ALU_PC_G2: 147 case R_ARM_LDR_PC_G0: 148 case R_ARM_LDR_PC_G1: 149 case R_ARM_LDR_PC_G2: 150 case R_ARM_LDRS_PC_G0: 151 case R_ARM_LDRS_PC_G1: 152 case R_ARM_LDRS_PC_G2: 153 case R_ARM_THM_ALU_PREL_11_0: 154 case R_ARM_THM_PC8: 155 case R_ARM_THM_PC12: 156 return R_ARM_PCA; 157 case R_ARM_MOVW_BREL_NC: 158 case R_ARM_MOVW_BREL: 159 case R_ARM_MOVT_BREL: 160 case R_ARM_THM_MOVW_BREL_NC: 161 case R_ARM_THM_MOVW_BREL: 162 case R_ARM_THM_MOVT_BREL: 163 return R_ARM_SBREL; 164 case R_ARM_NONE: 165 return R_NONE; 166 case R_ARM_TLS_LE32: 167 return R_TPREL; 168 case R_ARM_V4BX: 169 // V4BX is just a marker to indicate there's a "bx rN" instruction at the 170 // given address. It can be used to implement a special linker mode which 171 // rewrites ARMv4T inputs to ARMv4. Since we support only ARMv4 input and 172 // not ARMv4 output, we can just ignore it. 173 return R_NONE; 174 default: 175 error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) + 176 ") against symbol " + toString(s)); 177 return R_NONE; 178 } 179 } 180 181 RelType ARM::getDynRel(RelType type) const { 182 if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel)) 183 return R_ARM_ABS32; 184 return R_ARM_NONE; 185 } 186 187 void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const { 188 write32le(buf, in.plt->getVA()); 189 } 190 191 void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const { 192 // An ARM entry is the address of the ifunc resolver function. 193 write32le(buf, s.getVA()); 194 } 195 196 // Long form PLT Header that does not have any restrictions on the displacement 197 // of the .plt from the .plt.got. 198 static void writePltHeaderLong(uint8_t *buf) { 199 const uint8_t pltData[] = { 200 0x04, 0xe0, 0x2d, 0xe5, // str lr, [sp,#-4]! 201 0x04, 0xe0, 0x9f, 0xe5, // ldr lr, L2 202 0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr 203 0x08, 0xf0, 0xbe, 0xe5, // ldr pc, [lr, #8] 204 0x00, 0x00, 0x00, 0x00, // L2: .word &(.got.plt) - L1 - 8 205 0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary 206 0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary 207 0xd4, 0xd4, 0xd4, 0xd4}; 208 memcpy(buf, pltData, sizeof(pltData)); 209 uint64_t gotPlt = in.gotPlt->getVA(); 210 uint64_t l1 = in.plt->getVA() + 8; 211 write32le(buf + 16, gotPlt - l1 - 8); 212 } 213 214 // The default PLT header requires the .plt.got to be within 128 Mb of the 215 // .plt in the positive direction. 216 void ARM::writePltHeader(uint8_t *buf) const { 217 // Use a similar sequence to that in writePlt(), the difference is the calling 218 // conventions mean we use lr instead of ip. The PLT entry is responsible for 219 // saving lr on the stack, the dynamic loader is responsible for reloading 220 // it. 221 const uint32_t pltData[] = { 222 0xe52de004, // L1: str lr, [sp,#-4]! 223 0xe28fe600, // add lr, pc, #0x0NN00000 &(.got.plt - L1 - 4) 224 0xe28eea00, // add lr, lr, #0x000NN000 &(.got.plt - L1 - 4) 225 0xe5bef000, // ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4) 226 }; 227 228 uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4; 229 if (!llvm::isUInt<27>(offset)) { 230 // We cannot encode the Offset, use the long form. 231 writePltHeaderLong(buf); 232 return; 233 } 234 write32le(buf + 0, pltData[0]); 235 write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff)); 236 write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff)); 237 write32le(buf + 12, pltData[3] | (offset & 0xfff)); 238 memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary 239 memcpy(buf + 20, trapInstr.data(), 4); 240 memcpy(buf + 24, trapInstr.data(), 4); 241 memcpy(buf + 28, trapInstr.data(), 4); 242 } 243 244 void ARM::addPltHeaderSymbols(InputSection &isec) const { 245 addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec); 246 addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec); 247 } 248 249 // Long form PLT entries that do not have any restrictions on the displacement 250 // of the .plt from the .plt.got. 251 static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr, 252 uint64_t pltEntryAddr) { 253 const uint8_t pltData[] = { 254 0x04, 0xc0, 0x9f, 0xe5, // ldr ip, L2 255 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 256 0x00, 0xf0, 0x9c, 0xe5, // ldr pc, [ip] 257 0x00, 0x00, 0x00, 0x00, // L2: .word Offset(&(.plt.got) - L1 - 8 258 }; 259 memcpy(buf, pltData, sizeof(pltData)); 260 uint64_t l1 = pltEntryAddr + 4; 261 write32le(buf + 12, gotPltEntryAddr - l1 - 8); 262 } 263 264 // The default PLT entries require the .plt.got to be within 128 Mb of the 265 // .plt in the positive direction. 266 void ARM::writePlt(uint8_t *buf, const Symbol &sym, 267 uint64_t pltEntryAddr) const { 268 // The PLT entry is similar to the example given in Appendix A of ELF for 269 // the Arm Architecture. Instead of using the Group Relocations to find the 270 // optimal rotation for the 8-bit immediate used in the add instructions we 271 // hard code the most compact rotations for simplicity. This saves a load 272 // instruction over the long plt sequences. 273 const uint32_t pltData[] = { 274 0xe28fc600, // L1: add ip, pc, #0x0NN00000 Offset(&(.plt.got) - L1 - 8 275 0xe28cca00, // add ip, ip, #0x000NN000 Offset(&(.plt.got) - L1 - 8 276 0xe5bcf000, // ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8 277 }; 278 279 uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8; 280 if (!llvm::isUInt<27>(offset)) { 281 // We cannot encode the Offset, use the long form. 282 writePltLong(buf, sym.getGotPltVA(), pltEntryAddr); 283 return; 284 } 285 write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff)); 286 write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff)); 287 write32le(buf + 8, pltData[2] | (offset & 0xfff)); 288 memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary 289 } 290 291 void ARM::addPltSymbols(InputSection &isec, uint64_t off) const { 292 addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec); 293 addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec); 294 } 295 296 bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file, 297 uint64_t branchAddr, const Symbol &s, 298 int64_t a) const { 299 // If s is an undefined weak symbol and does not have a PLT entry then it will 300 // be resolved as a branch to the next instruction. If it is hidden, its 301 // binding has been converted to local, so we just check isUndefined() here. A 302 // undefined non-weak symbol will have been errored. 303 if (s.isUndefined() && !s.isInPlt()) 304 return false; 305 // A state change from ARM to Thumb and vice versa must go through an 306 // interworking thunk if the relocation type is not R_ARM_CALL or 307 // R_ARM_THM_CALL. 308 switch (type) { 309 case R_ARM_PC24: 310 case R_ARM_PLT32: 311 case R_ARM_JUMP24: 312 // Source is ARM, all PLT entries are ARM so no interworking required. 313 // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb). 314 if (s.isFunc() && expr == R_PC && (s.getVA() & 1)) 315 return true; 316 LLVM_FALLTHROUGH; 317 case R_ARM_CALL: { 318 uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA(); 319 return !inBranchRange(type, branchAddr, dst + a); 320 } 321 case R_ARM_THM_JUMP19: 322 case R_ARM_THM_JUMP24: 323 // Source is Thumb, all PLT entries are ARM so interworking is required. 324 // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM). 325 if (expr == R_PLT_PC || (s.isFunc() && (s.getVA() & 1) == 0)) 326 return true; 327 LLVM_FALLTHROUGH; 328 case R_ARM_THM_CALL: { 329 uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA(); 330 return !inBranchRange(type, branchAddr, dst + a); 331 } 332 } 333 return false; 334 } 335 336 uint32_t ARM::getThunkSectionSpacing() const { 337 // The placing of pre-created ThunkSections is controlled by the value 338 // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to 339 // place the ThunkSection such that all branches from the InputSections 340 // prior to the ThunkSection can reach a Thunk placed at the end of the 341 // ThunkSection. Graphically: 342 // | up to thunkSectionSpacing .text input sections | 343 // | ThunkSection | 344 // | up to thunkSectionSpacing .text input sections | 345 // | ThunkSection | 346 347 // Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This 348 // is to match the most common expected case of a Thumb 2 encoded BL, BLX or 349 // B.W: 350 // ARM B, BL, BLX range +/- 32MiB 351 // Thumb B.W, BL, BLX range +/- 16MiB 352 // Thumb B<cc>.W range +/- 1MiB 353 // If a branch cannot reach a pre-created ThunkSection a new one will be 354 // created so we can handle the rare cases of a Thumb 2 conditional branch. 355 // We intentionally use a lower size for thunkSectionSpacing than the maximum 356 // branch range so the end of the ThunkSection is more likely to be within 357 // range of the branch instruction that is furthest away. The value we shorten 358 // thunkSectionSpacing by is set conservatively to allow us to create 16,384 359 // 12 byte Thunks at any offset in a ThunkSection without risk of a branch to 360 // one of the Thunks going out of range. 361 362 // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch 363 // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except 364 // ARMv6T2) the range is +/- 4MiB. 365 366 return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000 367 : 0x400000 - 0x7500; 368 } 369 370 bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const { 371 if ((dst & 0x1) == 0) 372 // Destination is ARM, if ARM caller then Src is already 4-byte aligned. 373 // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure 374 // destination will be 4 byte aligned. 375 src &= ~0x3; 376 else 377 // Bit 0 == 1 denotes Thumb state, it is not part of the range. 378 dst &= ~0x1; 379 380 int64_t offset = dst - src; 381 switch (type) { 382 case R_ARM_PC24: 383 case R_ARM_PLT32: 384 case R_ARM_JUMP24: 385 case R_ARM_CALL: 386 return llvm::isInt<26>(offset); 387 case R_ARM_THM_JUMP19: 388 return llvm::isInt<21>(offset); 389 case R_ARM_THM_JUMP24: 390 case R_ARM_THM_CALL: 391 return config->armJ1J2BranchEncoding ? llvm::isInt<25>(offset) 392 : llvm::isInt<23>(offset); 393 default: 394 return true; 395 } 396 } 397 398 // Helper to produce message text when LLD detects that a CALL relocation to 399 // a non STT_FUNC symbol that may result in incorrect interworking between ARM 400 // or Thumb. 401 static void stateChangeWarning(uint8_t *loc, RelType relt, const Symbol &s) { 402 assert(!s.isFunc()); 403 const ErrorPlace place = getErrorPlace(loc); 404 std::string hint; 405 if (!place.srcLoc.empty()) 406 hint = "; " + place.srcLoc; 407 if (s.isSection()) { 408 // Section symbols must be defined and in a section. Users cannot change 409 // the type. Use the section name as getName() returns an empty string. 410 warn(place.loc + "branch and link relocation: " + toString(relt) + 411 " to STT_SECTION symbol " + cast<Defined>(s).section->name + 412 " ; interworking not performed" + hint); 413 } else { 414 // Warn with hint on how to alter the symbol type. 415 warn(getErrorLocation(loc) + "branch and link relocation: " + 416 toString(relt) + " to non STT_FUNC symbol: " + s.getName() + 417 " interworking not performed; consider using directive '.type " + 418 s.getName() + 419 ", %function' to give symbol type STT_FUNC if interworking between " 420 "ARM and Thumb is required" + 421 hint); 422 } 423 } 424 425 // Rotate a 32-bit unsigned value right by a specified amt of bits. 426 static uint32_t rotr32(uint32_t val, uint32_t amt) { 427 assert(amt < 32 && "Invalid rotate amount"); 428 return (val >> amt) | (val << ((32 - amt) & 31)); 429 } 430 431 static std::pair<uint32_t, uint32_t> getRemAndLZForGroup(unsigned group, 432 uint32_t val) { 433 uint32_t rem, lz; 434 do { 435 lz = llvm::countLeadingZeros(val) & ~1; 436 rem = val; 437 if (lz == 32) // implies rem == 0 438 break; 439 val &= 0xffffff >> lz; 440 } while (group--); 441 return {rem, lz}; 442 } 443 444 static void encodeAluGroup(uint8_t *loc, const Relocation &rel, uint64_t val, 445 int group, bool check) { 446 // ADD/SUB (immediate) add = bit23, sub = bit22 447 // immediate field carries is a 12-bit modified immediate, made up of a 4-bit 448 // even rotate right and an 8-bit immediate. 449 uint32_t opcode = 0x00800000; 450 if (val >> 63) { 451 opcode = 0x00400000; 452 val = -val; 453 } 454 uint32_t imm, lz; 455 std::tie(imm, lz) = getRemAndLZForGroup(group, val); 456 uint32_t rot = 0; 457 if (lz < 24) { 458 imm = rotr32(imm, 24 - lz); 459 rot = (lz + 8) << 7; 460 } 461 if (check && imm > 0xff) 462 error(getErrorLocation(loc) + "unencodeable immediate " + Twine(val).str() + 463 " for relocation " + toString(rel.type)); 464 write32le(loc, (read32le(loc) & 0xff3ff000) | opcode | rot | (imm & 0xff)); 465 } 466 467 static void encodeLdrGroup(uint8_t *loc, const Relocation &rel, uint64_t val, 468 int group) { 469 // R_ARM_LDR_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a 470 // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear 471 // bottom bit to recover S + A - P. 472 if (rel.sym->isFunc()) 473 val &= ~0x1; 474 // LDR (literal) u = bit23 475 uint32_t opcode = 0x00800000; 476 if (val >> 63) { 477 opcode = 0x0; 478 val = -val; 479 } 480 uint32_t imm = getRemAndLZForGroup(group, val).first; 481 checkUInt(loc, imm, 12, rel); 482 write32le(loc, (read32le(loc) & 0xff7ff000) | opcode | imm); 483 } 484 485 static void encodeLdrsGroup(uint8_t *loc, const Relocation &rel, uint64_t val, 486 int group) { 487 // R_ARM_LDRS_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a 488 // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear 489 // bottom bit to recover S + A - P. 490 if (rel.sym->isFunc()) 491 val &= ~0x1; 492 // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23 493 uint32_t opcode = 0x00800000; 494 if (val >> 63) { 495 opcode = 0x0; 496 val = -val; 497 } 498 uint32_t imm = getRemAndLZForGroup(group, val).first; 499 checkUInt(loc, imm, 8, rel); 500 write32le(loc, (read32le(loc) & 0xff7ff0f0) | opcode | ((imm & 0xf0) << 4) | 501 (imm & 0xf)); 502 } 503 504 void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { 505 switch (rel.type) { 506 case R_ARM_ABS32: 507 case R_ARM_BASE_PREL: 508 case R_ARM_GOTOFF32: 509 case R_ARM_GOT_BREL: 510 case R_ARM_GOT_PREL: 511 case R_ARM_REL32: 512 case R_ARM_RELATIVE: 513 case R_ARM_SBREL32: 514 case R_ARM_TARGET1: 515 case R_ARM_TARGET2: 516 case R_ARM_TLS_GD32: 517 case R_ARM_TLS_IE32: 518 case R_ARM_TLS_LDM32: 519 case R_ARM_TLS_LDO32: 520 case R_ARM_TLS_LE32: 521 case R_ARM_TLS_TPOFF32: 522 case R_ARM_TLS_DTPOFF32: 523 write32le(loc, val); 524 break; 525 case R_ARM_PREL31: 526 checkInt(loc, val, 31, rel); 527 write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000)); 528 break; 529 case R_ARM_CALL: { 530 // R_ARM_CALL is used for BL and BLX instructions, for symbols of type 531 // STT_FUNC we choose whether to write a BL or BLX depending on the 532 // value of bit 0 of Val. With bit 0 == 1 denoting Thumb. If the symbol is 533 // not of type STT_FUNC then we must preserve the original instruction. 534 // PLT entries are always ARM state so we know we don't need to interwork. 535 assert(rel.sym); // R_ARM_CALL is always reached via relocate(). 536 bool bit0Thumb = val & 1; 537 bool isBlx = (read32le(loc) & 0xfe000000) == 0xfa000000; 538 // lld 10.0 and before always used bit0Thumb when deciding to write a BLX 539 // even when type not STT_FUNC. 540 if (!rel.sym->isFunc() && isBlx != bit0Thumb) 541 stateChangeWarning(loc, rel.type, *rel.sym); 542 if (rel.sym->isFunc() ? bit0Thumb : isBlx) { 543 // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1' 544 checkInt(loc, val, 26, rel); 545 write32le(loc, 0xfa000000 | // opcode 546 ((val & 2) << 23) | // H 547 ((val >> 2) & 0x00ffffff)); // imm24 548 break; 549 } 550 // BLX (always unconditional) instruction to an ARM Target, select an 551 // unconditional BL. 552 write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff)); 553 // fall through as BL encoding is shared with B 554 } 555 LLVM_FALLTHROUGH; 556 case R_ARM_JUMP24: 557 case R_ARM_PC24: 558 case R_ARM_PLT32: 559 checkInt(loc, val, 26, rel); 560 write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff)); 561 break; 562 case R_ARM_THM_JUMP8: 563 // We do a 9 bit check because val is right-shifted by 1 bit. 564 checkInt(loc, val, 9, rel); 565 write16le(loc, (read32le(loc) & 0xff00) | ((val >> 1) & 0x00ff)); 566 break; 567 case R_ARM_THM_JUMP11: 568 // We do a 12 bit check because val is right-shifted by 1 bit. 569 checkInt(loc, val, 12, rel); 570 write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff)); 571 break; 572 case R_ARM_THM_JUMP19: 573 // Encoding T3: Val = S:J2:J1:imm6:imm11:0 574 checkInt(loc, val, 21, rel); 575 write16le(loc, 576 (read16le(loc) & 0xfbc0) | // opcode cond 577 ((val >> 10) & 0x0400) | // S 578 ((val >> 12) & 0x003f)); // imm6 579 write16le(loc + 2, 580 0x8000 | // opcode 581 ((val >> 8) & 0x0800) | // J2 582 ((val >> 5) & 0x2000) | // J1 583 ((val >> 1) & 0x07ff)); // imm11 584 break; 585 case R_ARM_THM_CALL: { 586 // R_ARM_THM_CALL is used for BL and BLX instructions, for symbols of type 587 // STT_FUNC we choose whether to write a BL or BLX depending on the 588 // value of bit 0 of Val. With bit 0 == 0 denoting ARM, if the symbol is 589 // not of type STT_FUNC then we must preserve the original instruction. 590 // PLT entries are always ARM state so we know we need to interwork. 591 assert(rel.sym); // R_ARM_THM_CALL is always reached via relocate(). 592 bool bit0Thumb = val & 1; 593 bool isBlx = (read16le(loc + 2) & 0x1000) == 0; 594 // lld 10.0 and before always used bit0Thumb when deciding to write a BLX 595 // even when type not STT_FUNC. PLT entries generated by LLD are always ARM. 596 if (!rel.sym->isFunc() && !rel.sym->isInPlt() && isBlx == bit0Thumb) 597 stateChangeWarning(loc, rel.type, *rel.sym); 598 if (rel.sym->isFunc() || rel.sym->isInPlt() ? !bit0Thumb : isBlx) { 599 // We are writing a BLX. Ensure BLX destination is 4-byte aligned. As 600 // the BLX instruction may only be two byte aligned. This must be done 601 // before overflow check. 602 val = alignTo(val, 4); 603 write16le(loc + 2, read16le(loc + 2) & ~0x1000); 604 } else { 605 write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | 1 << 12); 606 } 607 if (!config->armJ1J2BranchEncoding) { 608 // Older Arm architectures do not support R_ARM_THM_JUMP24 and have 609 // different encoding rules and range due to J1 and J2 always being 1. 610 checkInt(loc, val, 23, rel); 611 write16le(loc, 612 0xf000 | // opcode 613 ((val >> 12) & 0x07ff)); // imm11 614 write16le(loc + 2, 615 (read16le(loc + 2) & 0xd000) | // opcode 616 0x2800 | // J1 == J2 == 1 617 ((val >> 1) & 0x07ff)); // imm11 618 break; 619 } 620 } 621 // Fall through as rest of encoding is the same as B.W 622 LLVM_FALLTHROUGH; 623 case R_ARM_THM_JUMP24: 624 // Encoding B T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0 625 checkInt(loc, val, 25, rel); 626 write16le(loc, 627 0xf000 | // opcode 628 ((val >> 14) & 0x0400) | // S 629 ((val >> 12) & 0x03ff)); // imm10 630 write16le(loc + 2, 631 (read16le(loc + 2) & 0xd000) | // opcode 632 (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1 633 (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2 634 ((val >> 1) & 0x07ff)); // imm11 635 break; 636 case R_ARM_MOVW_ABS_NC: 637 case R_ARM_MOVW_PREL_NC: 638 case R_ARM_MOVW_BREL_NC: 639 write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) | 640 (val & 0x0fff)); 641 break; 642 case R_ARM_MOVT_ABS: 643 case R_ARM_MOVT_PREL: 644 case R_ARM_MOVT_BREL: 645 write32le(loc, (read32le(loc) & ~0x000f0fff) | 646 (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff)); 647 break; 648 case R_ARM_THM_MOVT_ABS: 649 case R_ARM_THM_MOVT_PREL: 650 case R_ARM_THM_MOVT_BREL: 651 // Encoding T1: A = imm4:i:imm3:imm8 652 write16le(loc, 653 0xf2c0 | // opcode 654 ((val >> 17) & 0x0400) | // i 655 ((val >> 28) & 0x000f)); // imm4 656 write16le(loc + 2, 657 (read16le(loc + 2) & 0x8f00) | // opcode 658 ((val >> 12) & 0x7000) | // imm3 659 ((val >> 16) & 0x00ff)); // imm8 660 break; 661 case R_ARM_THM_MOVW_ABS_NC: 662 case R_ARM_THM_MOVW_PREL_NC: 663 case R_ARM_THM_MOVW_BREL_NC: 664 // Encoding T3: A = imm4:i:imm3:imm8 665 write16le(loc, 666 0xf240 | // opcode 667 ((val >> 1) & 0x0400) | // i 668 ((val >> 12) & 0x000f)); // imm4 669 write16le(loc + 2, 670 (read16le(loc + 2) & 0x8f00) | // opcode 671 ((val << 4) & 0x7000) | // imm3 672 (val & 0x00ff)); // imm8 673 break; 674 case R_ARM_ALU_PC_G0: 675 encodeAluGroup(loc, rel, val, 0, true); 676 break; 677 case R_ARM_ALU_PC_G0_NC: 678 encodeAluGroup(loc, rel, val, 0, false); 679 break; 680 case R_ARM_ALU_PC_G1: 681 encodeAluGroup(loc, rel, val, 1, true); 682 break; 683 case R_ARM_ALU_PC_G1_NC: 684 encodeAluGroup(loc, rel, val, 1, false); 685 break; 686 case R_ARM_ALU_PC_G2: 687 encodeAluGroup(loc, rel, val, 2, true); 688 break; 689 case R_ARM_LDR_PC_G0: 690 encodeLdrGroup(loc, rel, val, 0); 691 break; 692 case R_ARM_LDR_PC_G1: 693 encodeLdrGroup(loc, rel, val, 1); 694 break; 695 case R_ARM_LDR_PC_G2: 696 encodeLdrGroup(loc, rel, val, 2); 697 break; 698 case R_ARM_LDRS_PC_G0: 699 encodeLdrsGroup(loc, rel, val, 0); 700 break; 701 case R_ARM_LDRS_PC_G1: 702 encodeLdrsGroup(loc, rel, val, 1); 703 break; 704 case R_ARM_LDRS_PC_G2: 705 encodeLdrsGroup(loc, rel, val, 2); 706 break; 707 case R_ARM_THM_ALU_PREL_11_0: { 708 // ADR encoding T2 (sub), T3 (add) i:imm3:imm8 709 int64_t imm = val; 710 uint16_t sub = 0; 711 if (imm < 0) { 712 imm = -imm; 713 sub = 0x00a0; 714 } 715 checkUInt(loc, imm, 12, rel); 716 write16le(loc, (read16le(loc) & 0xfb0f) | sub | (imm & 0x800) >> 1); 717 write16le(loc + 2, 718 (read16le(loc + 2) & 0x8f00) | (imm & 0x700) << 4 | (imm & 0xff)); 719 break; 720 } 721 case R_ARM_THM_PC8: 722 // ADR and LDR literal encoding T1 positive offset only imm8:00 723 // R_ARM_THM_PC8 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a 724 // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear 725 // bottom bit to recover S + A - Pa. 726 if (rel.sym->isFunc()) 727 val &= ~0x1; 728 checkUInt(loc, val, 10, rel); 729 checkAlignment(loc, val, 4, rel); 730 write16le(loc, (read16le(loc) & 0xff00) | (val & 0x3fc) >> 2); 731 break; 732 case R_ARM_THM_PC12: { 733 // LDR (literal) encoding T2, add = (U == '1') imm12 734 // imm12 is unsigned 735 // R_ARM_THM_PC12 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a 736 // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear 737 // bottom bit to recover S + A - Pa. 738 if (rel.sym->isFunc()) 739 val &= ~0x1; 740 int64_t imm12 = val; 741 uint16_t u = 0x0080; 742 if (imm12 < 0) { 743 imm12 = -imm12; 744 u = 0; 745 } 746 checkUInt(loc, imm12, 12, rel); 747 write16le(loc, read16le(loc) | u); 748 write16le(loc + 2, (read16le(loc + 2) & 0xf000) | imm12); 749 break; 750 } 751 default: 752 llvm_unreachable("unknown relocation"); 753 } 754 } 755 756 int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const { 757 switch (type) { 758 default: 759 internalLinkerError(getErrorLocation(buf), 760 "cannot read addend for relocation " + toString(type)); 761 return 0; 762 case R_ARM_ABS32: 763 case R_ARM_BASE_PREL: 764 case R_ARM_GLOB_DAT: 765 case R_ARM_GOTOFF32: 766 case R_ARM_GOT_BREL: 767 case R_ARM_GOT_PREL: 768 case R_ARM_IRELATIVE: 769 case R_ARM_REL32: 770 case R_ARM_RELATIVE: 771 case R_ARM_SBREL32: 772 case R_ARM_TARGET1: 773 case R_ARM_TARGET2: 774 case R_ARM_TLS_DTPMOD32: 775 case R_ARM_TLS_DTPOFF32: 776 case R_ARM_TLS_GD32: 777 case R_ARM_TLS_IE32: 778 case R_ARM_TLS_LDM32: 779 case R_ARM_TLS_LE32: 780 case R_ARM_TLS_LDO32: 781 case R_ARM_TLS_TPOFF32: 782 return SignExtend64<32>(read32le(buf)); 783 case R_ARM_PREL31: 784 return SignExtend64<31>(read32le(buf)); 785 case R_ARM_CALL: 786 case R_ARM_JUMP24: 787 case R_ARM_PC24: 788 case R_ARM_PLT32: 789 return SignExtend64<26>(read32le(buf) << 2); 790 case R_ARM_THM_JUMP8: 791 return SignExtend64<9>(read16le(buf) << 1); 792 case R_ARM_THM_JUMP11: 793 return SignExtend64<12>(read16le(buf) << 1); 794 case R_ARM_THM_JUMP19: { 795 // Encoding T3: A = S:J2:J1:imm10:imm6:0 796 uint16_t hi = read16le(buf); 797 uint16_t lo = read16le(buf + 2); 798 return SignExtend64<20>(((hi & 0x0400) << 10) | // S 799 ((lo & 0x0800) << 8) | // J2 800 ((lo & 0x2000) << 5) | // J1 801 ((hi & 0x003f) << 12) | // imm6 802 ((lo & 0x07ff) << 1)); // imm11:0 803 } 804 case R_ARM_THM_CALL: 805 if (!config->armJ1J2BranchEncoding) { 806 // Older Arm architectures do not support R_ARM_THM_JUMP24 and have 807 // different encoding rules and range due to J1 and J2 always being 1. 808 uint16_t hi = read16le(buf); 809 uint16_t lo = read16le(buf + 2); 810 return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11 811 ((lo & 0x7ff) << 1)); // imm11:0 812 break; 813 } 814 LLVM_FALLTHROUGH; 815 case R_ARM_THM_JUMP24: { 816 // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0 817 // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S) 818 uint16_t hi = read16le(buf); 819 uint16_t lo = read16le(buf + 2); 820 return SignExtend64<24>(((hi & 0x0400) << 14) | // S 821 (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1 822 (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2 823 ((hi & 0x003ff) << 12) | // imm0 824 ((lo & 0x007ff) << 1)); // imm11:0 825 } 826 // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and 827 // MOVT is in the range -32768 <= A < 32768 828 case R_ARM_MOVW_ABS_NC: 829 case R_ARM_MOVT_ABS: 830 case R_ARM_MOVW_PREL_NC: 831 case R_ARM_MOVT_PREL: 832 case R_ARM_MOVW_BREL_NC: 833 case R_ARM_MOVT_BREL: { 834 uint64_t val = read32le(buf) & 0x000f0fff; 835 return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff)); 836 } 837 case R_ARM_THM_MOVW_ABS_NC: 838 case R_ARM_THM_MOVT_ABS: 839 case R_ARM_THM_MOVW_PREL_NC: 840 case R_ARM_THM_MOVT_PREL: 841 case R_ARM_THM_MOVW_BREL_NC: 842 case R_ARM_THM_MOVT_BREL: { 843 // Encoding T3: A = imm4:i:imm3:imm8 844 uint16_t hi = read16le(buf); 845 uint16_t lo = read16le(buf + 2); 846 return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4 847 ((hi & 0x0400) << 1) | // i 848 ((lo & 0x7000) >> 4) | // imm3 849 (lo & 0x00ff)); // imm8 850 } 851 case R_ARM_ALU_PC_G0: 852 case R_ARM_ALU_PC_G0_NC: 853 case R_ARM_ALU_PC_G1: 854 case R_ARM_ALU_PC_G1_NC: 855 case R_ARM_ALU_PC_G2: { 856 // 12-bit immediate is a modified immediate made up of a 4-bit even 857 // right rotation and 8-bit constant. After the rotation the value 858 // is zero-extended. When bit 23 is set the instruction is an add, when 859 // bit 22 is set it is a sub. 860 uint32_t instr = read32le(buf); 861 uint32_t val = rotr32(instr & 0xff, ((instr & 0xf00) >> 8) * 2); 862 return (instr & 0x00400000) ? -val : val; 863 } 864 case R_ARM_LDR_PC_G0: 865 case R_ARM_LDR_PC_G1: 866 case R_ARM_LDR_PC_G2: { 867 // ADR (literal) add = bit23, sub = bit22 868 // LDR (literal) u = bit23 unsigned imm12 869 bool u = read32le(buf) & 0x00800000; 870 uint32_t imm12 = read32le(buf) & 0xfff; 871 return u ? imm12 : -imm12; 872 } 873 case R_ARM_LDRS_PC_G0: 874 case R_ARM_LDRS_PC_G1: 875 case R_ARM_LDRS_PC_G2: { 876 // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23 unsigned imm8 877 uint32_t opcode = read32le(buf); 878 bool u = opcode & 0x00800000; 879 uint32_t imm4l = opcode & 0xf; 880 uint32_t imm4h = (opcode & 0xf00) >> 4; 881 return u ? (imm4h | imm4l) : -(imm4h | imm4l); 882 } 883 case R_ARM_THM_ALU_PREL_11_0: { 884 // Thumb2 ADR, which is an alias for a sub or add instruction with an 885 // unsigned immediate. 886 // ADR encoding T2 (sub), T3 (add) i:imm3:imm8 887 uint16_t hi = read16le(buf); 888 uint16_t lo = read16le(buf + 2); 889 uint64_t imm = (hi & 0x0400) << 1 | // i 890 (lo & 0x7000) >> 4 | // imm3 891 (lo & 0x00ff); // imm8 892 // For sub, addend is negative, add is positive. 893 return (hi & 0x00f0) ? -imm : imm; 894 } 895 case R_ARM_THM_PC8: 896 // ADR and LDR (literal) encoding T1 897 // From ELF for the ARM Architecture the initial signed addend is formed 898 // from an unsigned field using expression (((imm8:00 + 4) & 0x3ff) – 4) 899 // this trick permits the PC bias of -4 to be encoded using imm8 = 0xff 900 return ((((read16le(buf) & 0xff) << 2) + 4) & 0x3ff) - 4; 901 case R_ARM_THM_PC12: { 902 // LDR (literal) encoding T2, add = (U == '1') imm12 903 bool u = read16le(buf) & 0x0080; 904 uint64_t imm12 = read16le(buf + 2) & 0x0fff; 905 return u ? imm12 : -imm12; 906 } 907 case R_ARM_NONE: 908 case R_ARM_V4BX: 909 case R_ARM_JUMP_SLOT: 910 // These relocations are defined as not having an implicit addend. 911 return 0; 912 } 913 } 914 915 TargetInfo *elf::getARMTargetInfo() { 916 static ARM target; 917 return ⌖ 918 } 919