1 //===- ARM.cpp ------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "InputFiles.h" 10 #include "Symbols.h" 11 #include "SyntheticSections.h" 12 #include "Target.h" 13 #include "Thunks.h" 14 #include "lld/Common/ErrorHandler.h" 15 #include "llvm/Object/ELF.h" 16 #include "llvm/Support/Endian.h" 17 18 using namespace llvm; 19 using namespace llvm::support::endian; 20 using namespace llvm::ELF; 21 22 namespace lld { 23 namespace elf { 24 25 namespace { 26 class ARM final : public TargetInfo { 27 public: 28 ARM(); 29 uint32_t calcEFlags() const override; 30 RelExpr getRelExpr(RelType type, const Symbol &s, 31 const uint8_t *loc) const override; 32 RelType getDynRel(RelType type) const override; 33 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override; 34 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 35 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override; 36 void writePltHeader(uint8_t *buf) const override; 37 void writePlt(uint8_t *buf, const Symbol &sym, 38 uint64_t pltEntryAddr) const override; 39 void addPltSymbols(InputSection &isec, uint64_t off) const override; 40 void addPltHeaderSymbols(InputSection &isd) const override; 41 bool needsThunk(RelExpr expr, RelType type, const InputFile *file, 42 uint64_t branchAddr, const Symbol &s, 43 int64_t a) const override; 44 uint32_t getThunkSectionSpacing() const override; 45 bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override; 46 void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override; 47 }; 48 } // namespace 49 50 ARM::ARM() { 51 copyRel = R_ARM_COPY; 52 relativeRel = R_ARM_RELATIVE; 53 iRelativeRel = R_ARM_IRELATIVE; 54 gotRel = R_ARM_GLOB_DAT; 55 noneRel = R_ARM_NONE; 56 pltRel = R_ARM_JUMP_SLOT; 57 symbolicRel = R_ARM_ABS32; 58 tlsGotRel = R_ARM_TLS_TPOFF32; 59 tlsModuleIndexRel = R_ARM_TLS_DTPMOD32; 60 tlsOffsetRel = R_ARM_TLS_DTPOFF32; 61 gotBaseSymInGotPlt = false; 62 pltHeaderSize = 32; 63 pltEntrySize = 16; 64 ipltEntrySize = 16; 65 trapInstr = {0xd4, 0xd4, 0xd4, 0xd4}; 66 needsThunks = true; 67 } 68 69 uint32_t ARM::calcEFlags() const { 70 // The ABIFloatType is used by loaders to detect the floating point calling 71 // convention. 72 uint32_t abiFloatType = 0; 73 if (config->armVFPArgs == ARMVFPArgKind::Base || 74 config->armVFPArgs == ARMVFPArgKind::Default) 75 abiFloatType = EF_ARM_ABI_FLOAT_SOFT; 76 else if (config->armVFPArgs == ARMVFPArgKind::VFP) 77 abiFloatType = EF_ARM_ABI_FLOAT_HARD; 78 79 // We don't currently use any features incompatible with EF_ARM_EABI_VER5, 80 // but we don't have any firm guarantees of conformance. Linux AArch64 81 // kernels (as of 2016) require an EABI version to be set. 82 return EF_ARM_EABI_VER5 | abiFloatType; 83 } 84 85 RelExpr ARM::getRelExpr(RelType type, const Symbol &s, 86 const uint8_t *loc) const { 87 switch (type) { 88 case R_ARM_THM_JUMP11: 89 return R_PC; 90 case R_ARM_CALL: 91 case R_ARM_JUMP24: 92 case R_ARM_PC24: 93 case R_ARM_PLT32: 94 case R_ARM_PREL31: 95 case R_ARM_THM_JUMP19: 96 case R_ARM_THM_JUMP24: 97 case R_ARM_THM_CALL: 98 return R_PLT_PC; 99 case R_ARM_GOTOFF32: 100 // (S + A) - GOT_ORG 101 return R_GOTREL; 102 case R_ARM_GOT_BREL: 103 // GOT(S) + A - GOT_ORG 104 return R_GOT_OFF; 105 case R_ARM_GOT_PREL: 106 case R_ARM_TLS_IE32: 107 // GOT(S) + A - P 108 return R_GOT_PC; 109 case R_ARM_SBREL32: 110 return R_ARM_SBREL; 111 case R_ARM_TARGET1: 112 return config->target1Rel ? R_PC : R_ABS; 113 case R_ARM_TARGET2: 114 if (config->target2 == Target2Policy::Rel) 115 return R_PC; 116 if (config->target2 == Target2Policy::Abs) 117 return R_ABS; 118 return R_GOT_PC; 119 case R_ARM_TLS_GD32: 120 return R_TLSGD_PC; 121 case R_ARM_TLS_LDM32: 122 return R_TLSLD_PC; 123 case R_ARM_BASE_PREL: 124 // B(S) + A - P 125 // FIXME: currently B(S) assumed to be .got, this may not hold for all 126 // platforms. 127 return R_GOTONLY_PC; 128 case R_ARM_MOVW_PREL_NC: 129 case R_ARM_MOVT_PREL: 130 case R_ARM_REL32: 131 case R_ARM_THM_MOVW_PREL_NC: 132 case R_ARM_THM_MOVT_PREL: 133 return R_PC; 134 case R_ARM_NONE: 135 return R_NONE; 136 case R_ARM_TLS_LE32: 137 return R_TLS; 138 case R_ARM_V4BX: 139 // V4BX is just a marker to indicate there's a "bx rN" instruction at the 140 // given address. It can be used to implement a special linker mode which 141 // rewrites ARMv4T inputs to ARMv4. Since we support only ARMv4 input and 142 // not ARMv4 output, we can just ignore it. 143 return R_NONE; 144 default: 145 return R_ABS; 146 } 147 } 148 149 RelType ARM::getDynRel(RelType type) const { 150 if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel)) 151 return R_ARM_ABS32; 152 return R_ARM_NONE; 153 } 154 155 void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const { 156 write32le(buf, in.plt->getVA()); 157 } 158 159 void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const { 160 // An ARM entry is the address of the ifunc resolver function. 161 write32le(buf, s.getVA()); 162 } 163 164 // Long form PLT Header that does not have any restrictions on the displacement 165 // of the .plt from the .plt.got. 166 static void writePltHeaderLong(uint8_t *buf) { 167 const uint8_t pltData[] = { 168 0x04, 0xe0, 0x2d, 0xe5, // str lr, [sp,#-4]! 169 0x04, 0xe0, 0x9f, 0xe5, // ldr lr, L2 170 0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr 171 0x08, 0xf0, 0xbe, 0xe5, // ldr pc, [lr, #8] 172 0x00, 0x00, 0x00, 0x00, // L2: .word &(.got.plt) - L1 - 8 173 0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary 174 0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary 175 0xd4, 0xd4, 0xd4, 0xd4}; 176 memcpy(buf, pltData, sizeof(pltData)); 177 uint64_t gotPlt = in.gotPlt->getVA(); 178 uint64_t l1 = in.plt->getVA() + 8; 179 write32le(buf + 16, gotPlt - l1 - 8); 180 } 181 182 // The default PLT header requires the .plt.got to be within 128 Mb of the 183 // .plt in the positive direction. 184 void ARM::writePltHeader(uint8_t *buf) const { 185 // Use a similar sequence to that in writePlt(), the difference is the calling 186 // conventions mean we use lr instead of ip. The PLT entry is responsible for 187 // saving lr on the stack, the dynamic loader is responsible for reloading 188 // it. 189 const uint32_t pltData[] = { 190 0xe52de004, // L1: str lr, [sp,#-4]! 191 0xe28fe600, // add lr, pc, #0x0NN00000 &(.got.plt - L1 - 4) 192 0xe28eea00, // add lr, lr, #0x000NN000 &(.got.plt - L1 - 4) 193 0xe5bef000, // ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4) 194 }; 195 196 uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4; 197 if (!llvm::isUInt<27>(offset)) { 198 // We cannot encode the Offset, use the long form. 199 writePltHeaderLong(buf); 200 return; 201 } 202 write32le(buf + 0, pltData[0]); 203 write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff)); 204 write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff)); 205 write32le(buf + 12, pltData[3] | (offset & 0xfff)); 206 memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary 207 memcpy(buf + 20, trapInstr.data(), 4); 208 memcpy(buf + 24, trapInstr.data(), 4); 209 memcpy(buf + 28, trapInstr.data(), 4); 210 } 211 212 void ARM::addPltHeaderSymbols(InputSection &isec) const { 213 addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec); 214 addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec); 215 } 216 217 // Long form PLT entries that do not have any restrictions on the displacement 218 // of the .plt from the .plt.got. 219 static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr, 220 uint64_t pltEntryAddr) { 221 const uint8_t pltData[] = { 222 0x04, 0xc0, 0x9f, 0xe5, // ldr ip, L2 223 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 224 0x00, 0xf0, 0x9c, 0xe5, // ldr pc, [ip] 225 0x00, 0x00, 0x00, 0x00, // L2: .word Offset(&(.plt.got) - L1 - 8 226 }; 227 memcpy(buf, pltData, sizeof(pltData)); 228 uint64_t l1 = pltEntryAddr + 4; 229 write32le(buf + 12, gotPltEntryAddr - l1 - 8); 230 } 231 232 // The default PLT entries require the .plt.got to be within 128 Mb of the 233 // .plt in the positive direction. 234 void ARM::writePlt(uint8_t *buf, const Symbol &sym, 235 uint64_t pltEntryAddr) const { 236 // The PLT entry is similar to the example given in Appendix A of ELF for 237 // the Arm Architecture. Instead of using the Group Relocations to find the 238 // optimal rotation for the 8-bit immediate used in the add instructions we 239 // hard code the most compact rotations for simplicity. This saves a load 240 // instruction over the long plt sequences. 241 const uint32_t pltData[] = { 242 0xe28fc600, // L1: add ip, pc, #0x0NN00000 Offset(&(.plt.got) - L1 - 8 243 0xe28cca00, // add ip, ip, #0x000NN000 Offset(&(.plt.got) - L1 - 8 244 0xe5bcf000, // ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8 245 }; 246 247 uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8; 248 if (!llvm::isUInt<27>(offset)) { 249 // We cannot encode the Offset, use the long form. 250 writePltLong(buf, sym.getGotPltVA(), pltEntryAddr); 251 return; 252 } 253 write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff)); 254 write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff)); 255 write32le(buf + 8, pltData[2] | (offset & 0xfff)); 256 memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary 257 } 258 259 void ARM::addPltSymbols(InputSection &isec, uint64_t off) const { 260 addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec); 261 addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec); 262 } 263 264 bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file, 265 uint64_t branchAddr, const Symbol &s, int64_t /*a*/) const { 266 // If S is an undefined weak symbol and does not have a PLT entry then it 267 // will be resolved as a branch to the next instruction. 268 if (s.isUndefWeak() && !s.isInPlt()) 269 return false; 270 // A state change from ARM to Thumb and vice versa must go through an 271 // interworking thunk if the relocation type is not R_ARM_CALL or 272 // R_ARM_THM_CALL. 273 switch (type) { 274 case R_ARM_PC24: 275 case R_ARM_PLT32: 276 case R_ARM_JUMP24: 277 // Source is ARM, all PLT entries are ARM so no interworking required. 278 // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb). 279 if (s.isFunc() && expr == R_PC && (s.getVA() & 1)) 280 return true; 281 LLVM_FALLTHROUGH; 282 case R_ARM_CALL: { 283 uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA(); 284 return !inBranchRange(type, branchAddr, dst); 285 } 286 case R_ARM_THM_JUMP19: 287 case R_ARM_THM_JUMP24: 288 // Source is Thumb, all PLT entries are ARM so interworking is required. 289 // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM). 290 if (expr == R_PLT_PC || (s.isFunc() && (s.getVA() & 1) == 0)) 291 return true; 292 LLVM_FALLTHROUGH; 293 case R_ARM_THM_CALL: { 294 uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA(); 295 return !inBranchRange(type, branchAddr, dst); 296 } 297 } 298 return false; 299 } 300 301 uint32_t ARM::getThunkSectionSpacing() const { 302 // The placing of pre-created ThunkSections is controlled by the value 303 // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to 304 // place the ThunkSection such that all branches from the InputSections 305 // prior to the ThunkSection can reach a Thunk placed at the end of the 306 // ThunkSection. Graphically: 307 // | up to thunkSectionSpacing .text input sections | 308 // | ThunkSection | 309 // | up to thunkSectionSpacing .text input sections | 310 // | ThunkSection | 311 312 // Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This 313 // is to match the most common expected case of a Thumb 2 encoded BL, BLX or 314 // B.W: 315 // ARM B, BL, BLX range +/- 32MiB 316 // Thumb B.W, BL, BLX range +/- 16MiB 317 // Thumb B<cc>.W range +/- 1MiB 318 // If a branch cannot reach a pre-created ThunkSection a new one will be 319 // created so we can handle the rare cases of a Thumb 2 conditional branch. 320 // We intentionally use a lower size for thunkSectionSpacing than the maximum 321 // branch range so the end of the ThunkSection is more likely to be within 322 // range of the branch instruction that is furthest away. The value we shorten 323 // thunkSectionSpacing by is set conservatively to allow us to create 16,384 324 // 12 byte Thunks at any offset in a ThunkSection without risk of a branch to 325 // one of the Thunks going out of range. 326 327 // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch 328 // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except 329 // ARMv6T2) the range is +/- 4MiB. 330 331 return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000 332 : 0x400000 - 0x7500; 333 } 334 335 bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const { 336 uint64_t range; 337 uint64_t instrSize; 338 339 switch (type) { 340 case R_ARM_PC24: 341 case R_ARM_PLT32: 342 case R_ARM_JUMP24: 343 case R_ARM_CALL: 344 range = 0x2000000; 345 instrSize = 4; 346 break; 347 case R_ARM_THM_JUMP19: 348 range = 0x100000; 349 instrSize = 2; 350 break; 351 case R_ARM_THM_JUMP24: 352 case R_ARM_THM_CALL: 353 range = config->armJ1J2BranchEncoding ? 0x1000000 : 0x400000; 354 instrSize = 2; 355 break; 356 default: 357 return true; 358 } 359 // PC at Src is 2 instructions ahead, immediate of branch is signed 360 if (src > dst) 361 range -= 2 * instrSize; 362 else 363 range += instrSize; 364 365 if ((dst & 0x1) == 0) 366 // Destination is ARM, if ARM caller then Src is already 4-byte aligned. 367 // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure 368 // destination will be 4 byte aligned. 369 src &= ~0x3; 370 else 371 // Bit 0 == 1 denotes Thumb state, it is not part of the range 372 dst &= ~0x1; 373 374 uint64_t distance = (src > dst) ? src - dst : dst - src; 375 return distance <= range; 376 } 377 378 void ARM::relocateOne(uint8_t *loc, RelType type, uint64_t val) const { 379 switch (type) { 380 case R_ARM_ABS32: 381 case R_ARM_BASE_PREL: 382 case R_ARM_GOTOFF32: 383 case R_ARM_GOT_BREL: 384 case R_ARM_GOT_PREL: 385 case R_ARM_REL32: 386 case R_ARM_RELATIVE: 387 case R_ARM_SBREL32: 388 case R_ARM_TARGET1: 389 case R_ARM_TARGET2: 390 case R_ARM_TLS_GD32: 391 case R_ARM_TLS_IE32: 392 case R_ARM_TLS_LDM32: 393 case R_ARM_TLS_LDO32: 394 case R_ARM_TLS_LE32: 395 case R_ARM_TLS_TPOFF32: 396 case R_ARM_TLS_DTPOFF32: 397 write32le(loc, val); 398 break; 399 case R_ARM_PREL31: 400 checkInt(loc, val, 31, type); 401 write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000)); 402 break; 403 case R_ARM_CALL: 404 // R_ARM_CALL is used for BL and BLX instructions, depending on the 405 // value of bit 0 of Val, we must select a BL or BLX instruction 406 if (val & 1) { 407 // If bit 0 of Val is 1 the target is Thumb, we must select a BLX. 408 // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1' 409 checkInt(loc, val, 26, type); 410 write32le(loc, 0xfa000000 | // opcode 411 ((val & 2) << 23) | // H 412 ((val >> 2) & 0x00ffffff)); // imm24 413 break; 414 } 415 if ((read32le(loc) & 0xfe000000) == 0xfa000000) 416 // BLX (always unconditional) instruction to an ARM Target, select an 417 // unconditional BL. 418 write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff)); 419 // fall through as BL encoding is shared with B 420 LLVM_FALLTHROUGH; 421 case R_ARM_JUMP24: 422 case R_ARM_PC24: 423 case R_ARM_PLT32: 424 checkInt(loc, val, 26, type); 425 write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff)); 426 break; 427 case R_ARM_THM_JUMP11: 428 checkInt(loc, val, 12, type); 429 write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff)); 430 break; 431 case R_ARM_THM_JUMP19: 432 // Encoding T3: Val = S:J2:J1:imm6:imm11:0 433 checkInt(loc, val, 21, type); 434 write16le(loc, 435 (read16le(loc) & 0xfbc0) | // opcode cond 436 ((val >> 10) & 0x0400) | // S 437 ((val >> 12) & 0x003f)); // imm6 438 write16le(loc + 2, 439 0x8000 | // opcode 440 ((val >> 8) & 0x0800) | // J2 441 ((val >> 5) & 0x2000) | // J1 442 ((val >> 1) & 0x07ff)); // imm11 443 break; 444 case R_ARM_THM_CALL: 445 // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the 446 // value of bit 0 of Val, we must select a BL or BLX instruction 447 if ((val & 1) == 0) { 448 // Ensure BLX destination is 4-byte aligned. As BLX instruction may 449 // only be two byte aligned. This must be done before overflow check 450 val = alignTo(val, 4); 451 } 452 // Bit 12 is 0 for BLX, 1 for BL 453 write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | (val & 1) << 12); 454 if (!config->armJ1J2BranchEncoding) { 455 // Older Arm architectures do not support R_ARM_THM_JUMP24 and have 456 // different encoding rules and range due to J1 and J2 always being 1. 457 checkInt(loc, val, 23, type); 458 write16le(loc, 459 0xf000 | // opcode 460 ((val >> 12) & 0x07ff)); // imm11 461 write16le(loc + 2, 462 (read16le(loc + 2) & 0xd000) | // opcode 463 0x2800 | // J1 == J2 == 1 464 ((val >> 1) & 0x07ff)); // imm11 465 break; 466 } 467 // Fall through as rest of encoding is the same as B.W 468 LLVM_FALLTHROUGH; 469 case R_ARM_THM_JUMP24: 470 // Encoding B T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0 471 checkInt(loc, val, 25, type); 472 write16le(loc, 473 0xf000 | // opcode 474 ((val >> 14) & 0x0400) | // S 475 ((val >> 12) & 0x03ff)); // imm10 476 write16le(loc + 2, 477 (read16le(loc + 2) & 0xd000) | // opcode 478 (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1 479 (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2 480 ((val >> 1) & 0x07ff)); // imm11 481 break; 482 case R_ARM_MOVW_ABS_NC: 483 case R_ARM_MOVW_PREL_NC: 484 write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) | 485 (val & 0x0fff)); 486 break; 487 case R_ARM_MOVT_ABS: 488 case R_ARM_MOVT_PREL: 489 write32le(loc, (read32le(loc) & ~0x000f0fff) | 490 (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff)); 491 break; 492 case R_ARM_THM_MOVT_ABS: 493 case R_ARM_THM_MOVT_PREL: 494 // Encoding T1: A = imm4:i:imm3:imm8 495 write16le(loc, 496 0xf2c0 | // opcode 497 ((val >> 17) & 0x0400) | // i 498 ((val >> 28) & 0x000f)); // imm4 499 write16le(loc + 2, 500 (read16le(loc + 2) & 0x8f00) | // opcode 501 ((val >> 12) & 0x7000) | // imm3 502 ((val >> 16) & 0x00ff)); // imm8 503 break; 504 case R_ARM_THM_MOVW_ABS_NC: 505 case R_ARM_THM_MOVW_PREL_NC: 506 // Encoding T3: A = imm4:i:imm3:imm8 507 write16le(loc, 508 0xf240 | // opcode 509 ((val >> 1) & 0x0400) | // i 510 ((val >> 12) & 0x000f)); // imm4 511 write16le(loc + 2, 512 (read16le(loc + 2) & 0x8f00) | // opcode 513 ((val << 4) & 0x7000) | // imm3 514 (val & 0x00ff)); // imm8 515 break; 516 default: 517 error(getErrorLocation(loc) + "unrecognized relocation " + toString(type)); 518 } 519 } 520 521 int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const { 522 switch (type) { 523 default: 524 return 0; 525 case R_ARM_ABS32: 526 case R_ARM_BASE_PREL: 527 case R_ARM_GOTOFF32: 528 case R_ARM_GOT_BREL: 529 case R_ARM_GOT_PREL: 530 case R_ARM_REL32: 531 case R_ARM_TARGET1: 532 case R_ARM_TARGET2: 533 case R_ARM_TLS_GD32: 534 case R_ARM_TLS_LDM32: 535 case R_ARM_TLS_LDO32: 536 case R_ARM_TLS_IE32: 537 case R_ARM_TLS_LE32: 538 return SignExtend64<32>(read32le(buf)); 539 case R_ARM_PREL31: 540 return SignExtend64<31>(read32le(buf)); 541 case R_ARM_CALL: 542 case R_ARM_JUMP24: 543 case R_ARM_PC24: 544 case R_ARM_PLT32: 545 return SignExtend64<26>(read32le(buf) << 2); 546 case R_ARM_THM_JUMP11: 547 return SignExtend64<12>(read16le(buf) << 1); 548 case R_ARM_THM_JUMP19: { 549 // Encoding T3: A = S:J2:J1:imm10:imm6:0 550 uint16_t hi = read16le(buf); 551 uint16_t lo = read16le(buf + 2); 552 return SignExtend64<20>(((hi & 0x0400) << 10) | // S 553 ((lo & 0x0800) << 8) | // J2 554 ((lo & 0x2000) << 5) | // J1 555 ((hi & 0x003f) << 12) | // imm6 556 ((lo & 0x07ff) << 1)); // imm11:0 557 } 558 case R_ARM_THM_CALL: 559 if (!config->armJ1J2BranchEncoding) { 560 // Older Arm architectures do not support R_ARM_THM_JUMP24 and have 561 // different encoding rules and range due to J1 and J2 always being 1. 562 uint16_t hi = read16le(buf); 563 uint16_t lo = read16le(buf + 2); 564 return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11 565 ((lo & 0x7ff) << 1)); // imm11:0 566 break; 567 } 568 LLVM_FALLTHROUGH; 569 case R_ARM_THM_JUMP24: { 570 // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0 571 // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S) 572 uint16_t hi = read16le(buf); 573 uint16_t lo = read16le(buf + 2); 574 return SignExtend64<24>(((hi & 0x0400) << 14) | // S 575 (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1 576 (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2 577 ((hi & 0x003ff) << 12) | // imm0 578 ((lo & 0x007ff) << 1)); // imm11:0 579 } 580 // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and 581 // MOVT is in the range -32768 <= A < 32768 582 case R_ARM_MOVW_ABS_NC: 583 case R_ARM_MOVT_ABS: 584 case R_ARM_MOVW_PREL_NC: 585 case R_ARM_MOVT_PREL: { 586 uint64_t val = read32le(buf) & 0x000f0fff; 587 return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff)); 588 } 589 case R_ARM_THM_MOVW_ABS_NC: 590 case R_ARM_THM_MOVT_ABS: 591 case R_ARM_THM_MOVW_PREL_NC: 592 case R_ARM_THM_MOVT_PREL: { 593 // Encoding T3: A = imm4:i:imm3:imm8 594 uint16_t hi = read16le(buf); 595 uint16_t lo = read16le(buf + 2); 596 return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4 597 ((hi & 0x0400) << 1) | // i 598 ((lo & 0x7000) >> 4) | // imm3 599 (lo & 0x00ff)); // imm8 600 } 601 } 602 } 603 604 TargetInfo *getARMTargetInfo() { 605 static ARM target; 606 return ⌖ 607 } 608 609 } // namespace elf 610 } // namespace lld 611