xref: /freebsd/contrib/llvm-project/lld/ELF/Arch/ARM.cpp (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1 //===- ARM.cpp ------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Symbols.h"
10 #include "SyntheticSections.h"
11 #include "Target.h"
12 #include "lld/Common/ErrorHandler.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/Support/Endian.h"
15 
16 using namespace llvm;
17 using namespace llvm::support::endian;
18 using namespace llvm::ELF;
19 using namespace lld;
20 using namespace lld::elf;
21 
22 namespace {
23 class ARM final : public TargetInfo {
24 public:
25   ARM();
26   uint32_t calcEFlags() const override;
27   RelExpr getRelExpr(RelType type, const Symbol &s,
28                      const uint8_t *loc) const override;
29   RelType getDynRel(RelType type) const override;
30   int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
31   void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
32   void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
33   void writePltHeader(uint8_t *buf) const override;
34   void writePlt(uint8_t *buf, const Symbol &sym,
35                 uint64_t pltEntryAddr) const override;
36   void addPltSymbols(InputSection &isec, uint64_t off) const override;
37   void addPltHeaderSymbols(InputSection &isd) const override;
38   bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
39                   uint64_t branchAddr, const Symbol &s,
40                   int64_t a) const override;
41   uint32_t getThunkSectionSpacing() const override;
42   bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
43   void relocate(uint8_t *loc, const Relocation &rel,
44                 uint64_t val) const override;
45 };
46 } // namespace
47 
48 ARM::ARM() {
49   copyRel = R_ARM_COPY;
50   relativeRel = R_ARM_RELATIVE;
51   iRelativeRel = R_ARM_IRELATIVE;
52   gotRel = R_ARM_GLOB_DAT;
53   pltRel = R_ARM_JUMP_SLOT;
54   symbolicRel = R_ARM_ABS32;
55   tlsGotRel = R_ARM_TLS_TPOFF32;
56   tlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
57   tlsOffsetRel = R_ARM_TLS_DTPOFF32;
58   pltHeaderSize = 32;
59   pltEntrySize = 16;
60   ipltEntrySize = 16;
61   trapInstr = {0xd4, 0xd4, 0xd4, 0xd4};
62   needsThunks = true;
63   defaultMaxPageSize = 65536;
64 }
65 
66 uint32_t ARM::calcEFlags() const {
67   // The ABIFloatType is used by loaders to detect the floating point calling
68   // convention.
69   uint32_t abiFloatType = 0;
70   if (config->armVFPArgs == ARMVFPArgKind::Base ||
71       config->armVFPArgs == ARMVFPArgKind::Default)
72     abiFloatType = EF_ARM_ABI_FLOAT_SOFT;
73   else if (config->armVFPArgs == ARMVFPArgKind::VFP)
74     abiFloatType = EF_ARM_ABI_FLOAT_HARD;
75 
76   // We don't currently use any features incompatible with EF_ARM_EABI_VER5,
77   // but we don't have any firm guarantees of conformance. Linux AArch64
78   // kernels (as of 2016) require an EABI version to be set.
79   return EF_ARM_EABI_VER5 | abiFloatType;
80 }
81 
82 RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
83                         const uint8_t *loc) const {
84   switch (type) {
85   case R_ARM_ABS32:
86   case R_ARM_MOVW_ABS_NC:
87   case R_ARM_MOVT_ABS:
88   case R_ARM_THM_MOVW_ABS_NC:
89   case R_ARM_THM_MOVT_ABS:
90     return R_ABS;
91   case R_ARM_THM_JUMP8:
92   case R_ARM_THM_JUMP11:
93     return R_PC;
94   case R_ARM_CALL:
95   case R_ARM_JUMP24:
96   case R_ARM_PC24:
97   case R_ARM_PLT32:
98   case R_ARM_PREL31:
99   case R_ARM_THM_JUMP19:
100   case R_ARM_THM_JUMP24:
101   case R_ARM_THM_CALL:
102     return R_PLT_PC;
103   case R_ARM_GOTOFF32:
104     // (S + A) - GOT_ORG
105     return R_GOTREL;
106   case R_ARM_GOT_BREL:
107     // GOT(S) + A - GOT_ORG
108     return R_GOT_OFF;
109   case R_ARM_GOT_PREL:
110   case R_ARM_TLS_IE32:
111     // GOT(S) + A - P
112     return R_GOT_PC;
113   case R_ARM_SBREL32:
114     return R_ARM_SBREL;
115   case R_ARM_TARGET1:
116     return config->target1Rel ? R_PC : R_ABS;
117   case R_ARM_TARGET2:
118     if (config->target2 == Target2Policy::Rel)
119       return R_PC;
120     if (config->target2 == Target2Policy::Abs)
121       return R_ABS;
122     return R_GOT_PC;
123   case R_ARM_TLS_GD32:
124     return R_TLSGD_PC;
125   case R_ARM_TLS_LDM32:
126     return R_TLSLD_PC;
127   case R_ARM_TLS_LDO32:
128     return R_DTPREL;
129   case R_ARM_BASE_PREL:
130     // B(S) + A - P
131     // FIXME: currently B(S) assumed to be .got, this may not hold for all
132     // platforms.
133     return R_GOTONLY_PC;
134   case R_ARM_MOVW_PREL_NC:
135   case R_ARM_MOVT_PREL:
136   case R_ARM_REL32:
137   case R_ARM_THM_MOVW_PREL_NC:
138   case R_ARM_THM_MOVT_PREL:
139     return R_PC;
140   case R_ARM_ALU_PC_G0:
141   case R_ARM_ALU_PC_G0_NC:
142   case R_ARM_ALU_PC_G1:
143   case R_ARM_ALU_PC_G1_NC:
144   case R_ARM_ALU_PC_G2:
145   case R_ARM_LDR_PC_G0:
146   case R_ARM_LDR_PC_G1:
147   case R_ARM_LDR_PC_G2:
148   case R_ARM_LDRS_PC_G0:
149   case R_ARM_LDRS_PC_G1:
150   case R_ARM_LDRS_PC_G2:
151   case R_ARM_THM_ALU_PREL_11_0:
152   case R_ARM_THM_PC8:
153   case R_ARM_THM_PC12:
154     return R_ARM_PCA;
155   case R_ARM_MOVW_BREL_NC:
156   case R_ARM_MOVW_BREL:
157   case R_ARM_MOVT_BREL:
158   case R_ARM_THM_MOVW_BREL_NC:
159   case R_ARM_THM_MOVW_BREL:
160   case R_ARM_THM_MOVT_BREL:
161     return R_ARM_SBREL;
162   case R_ARM_NONE:
163     return R_NONE;
164   case R_ARM_TLS_LE32:
165     return R_TPREL;
166   case R_ARM_V4BX:
167     // V4BX is just a marker to indicate there's a "bx rN" instruction at the
168     // given address. It can be used to implement a special linker mode which
169     // rewrites ARMv4T inputs to ARMv4. Since we support only ARMv4 input and
170     // not ARMv4 output, we can just ignore it.
171     return R_NONE;
172   default:
173     error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
174           ") against symbol " + toString(s));
175     return R_NONE;
176   }
177 }
178 
179 RelType ARM::getDynRel(RelType type) const {
180   if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel))
181     return R_ARM_ABS32;
182   return R_ARM_NONE;
183 }
184 
185 void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
186   write32le(buf, in.plt->getVA());
187 }
188 
189 void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
190   // An ARM entry is the address of the ifunc resolver function.
191   write32le(buf, s.getVA());
192 }
193 
194 // Long form PLT Header that does not have any restrictions on the displacement
195 // of the .plt from the .plt.got.
196 static void writePltHeaderLong(uint8_t *buf) {
197   const uint8_t pltData[] = {
198       0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
199       0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
200       0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
201       0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
202       0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
203       0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
204       0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
205       0xd4, 0xd4, 0xd4, 0xd4};
206   memcpy(buf, pltData, sizeof(pltData));
207   uint64_t gotPlt = in.gotPlt->getVA();
208   uint64_t l1 = in.plt->getVA() + 8;
209   write32le(buf + 16, gotPlt - l1 - 8);
210 }
211 
212 // The default PLT header requires the .plt.got to be within 128 Mb of the
213 // .plt in the positive direction.
214 void ARM::writePltHeader(uint8_t *buf) const {
215   // Use a similar sequence to that in writePlt(), the difference is the calling
216   // conventions mean we use lr instead of ip. The PLT entry is responsible for
217   // saving lr on the stack, the dynamic loader is responsible for reloading
218   // it.
219   const uint32_t pltData[] = {
220       0xe52de004, // L1: str lr, [sp,#-4]!
221       0xe28fe600, //     add lr, pc,  #0x0NN00000 &(.got.plt - L1 - 4)
222       0xe28eea00, //     add lr, lr,  #0x000NN000 &(.got.plt - L1 - 4)
223       0xe5bef000, //     ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4)
224   };
225 
226   uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4;
227   if (!llvm::isUInt<27>(offset)) {
228     // We cannot encode the Offset, use the long form.
229     writePltHeaderLong(buf);
230     return;
231   }
232   write32le(buf + 0, pltData[0]);
233   write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff));
234   write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff));
235   write32le(buf + 12, pltData[3] | (offset & 0xfff));
236   memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary
237   memcpy(buf + 20, trapInstr.data(), 4);
238   memcpy(buf + 24, trapInstr.data(), 4);
239   memcpy(buf + 28, trapInstr.data(), 4);
240 }
241 
242 void ARM::addPltHeaderSymbols(InputSection &isec) const {
243   addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec);
244   addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec);
245 }
246 
247 // Long form PLT entries that do not have any restrictions on the displacement
248 // of the .plt from the .plt.got.
249 static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
250                          uint64_t pltEntryAddr) {
251   const uint8_t pltData[] = {
252       0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
253       0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
254       0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
255       0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
256   };
257   memcpy(buf, pltData, sizeof(pltData));
258   uint64_t l1 = pltEntryAddr + 4;
259   write32le(buf + 12, gotPltEntryAddr - l1 - 8);
260 }
261 
262 // The default PLT entries require the .plt.got to be within 128 Mb of the
263 // .plt in the positive direction.
264 void ARM::writePlt(uint8_t *buf, const Symbol &sym,
265                    uint64_t pltEntryAddr) const {
266   // The PLT entry is similar to the example given in Appendix A of ELF for
267   // the Arm Architecture. Instead of using the Group Relocations to find the
268   // optimal rotation for the 8-bit immediate used in the add instructions we
269   // hard code the most compact rotations for simplicity. This saves a load
270   // instruction over the long plt sequences.
271   const uint32_t pltData[] = {
272       0xe28fc600, // L1: add ip, pc,  #0x0NN00000  Offset(&(.plt.got) - L1 - 8
273       0xe28cca00, //     add ip, ip,  #0x000NN000  Offset(&(.plt.got) - L1 - 8
274       0xe5bcf000, //     ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8
275   };
276 
277   uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8;
278   if (!llvm::isUInt<27>(offset)) {
279     // We cannot encode the Offset, use the long form.
280     writePltLong(buf, sym.getGotPltVA(), pltEntryAddr);
281     return;
282   }
283   write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff));
284   write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff));
285   write32le(buf + 8, pltData[2] | (offset & 0xfff));
286   memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary
287 }
288 
289 void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
290   addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec);
291   addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec);
292 }
293 
294 bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
295                      uint64_t branchAddr, const Symbol &s,
296                      int64_t a) const {
297   // If s is an undefined weak symbol and does not have a PLT entry then it will
298   // be resolved as a branch to the next instruction. If it is hidden, its
299   // binding has been converted to local, so we just check isUndefined() here. A
300   // undefined non-weak symbol will have been errored.
301   if (s.isUndefined() && !s.isInPlt())
302     return false;
303   // A state change from ARM to Thumb and vice versa must go through an
304   // interworking thunk if the relocation type is not R_ARM_CALL or
305   // R_ARM_THM_CALL.
306   switch (type) {
307   case R_ARM_PC24:
308   case R_ARM_PLT32:
309   case R_ARM_JUMP24:
310     // Source is ARM, all PLT entries are ARM so no interworking required.
311     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb).
312     if (s.isFunc() && expr == R_PC && (s.getVA() & 1))
313       return true;
314     LLVM_FALLTHROUGH;
315   case R_ARM_CALL: {
316     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
317     return !inBranchRange(type, branchAddr, dst + a);
318   }
319   case R_ARM_THM_JUMP19:
320   case R_ARM_THM_JUMP24:
321     // Source is Thumb, all PLT entries are ARM so interworking is required.
322     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM).
323     if (expr == R_PLT_PC || (s.isFunc() && (s.getVA() & 1) == 0))
324       return true;
325     LLVM_FALLTHROUGH;
326   case R_ARM_THM_CALL: {
327     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
328     return !inBranchRange(type, branchAddr, dst + a);
329   }
330   }
331   return false;
332 }
333 
334 uint32_t ARM::getThunkSectionSpacing() const {
335   // The placing of pre-created ThunkSections is controlled by the value
336   // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to
337   // place the ThunkSection such that all branches from the InputSections
338   // prior to the ThunkSection can reach a Thunk placed at the end of the
339   // ThunkSection. Graphically:
340   // | up to thunkSectionSpacing .text input sections |
341   // | ThunkSection                                   |
342   // | up to thunkSectionSpacing .text input sections |
343   // | ThunkSection                                   |
344 
345   // Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This
346   // is to match the most common expected case of a Thumb 2 encoded BL, BLX or
347   // B.W:
348   // ARM B, BL, BLX range +/- 32MiB
349   // Thumb B.W, BL, BLX range +/- 16MiB
350   // Thumb B<cc>.W range +/- 1MiB
351   // If a branch cannot reach a pre-created ThunkSection a new one will be
352   // created so we can handle the rare cases of a Thumb 2 conditional branch.
353   // We intentionally use a lower size for thunkSectionSpacing than the maximum
354   // branch range so the end of the ThunkSection is more likely to be within
355   // range of the branch instruction that is furthest away. The value we shorten
356   // thunkSectionSpacing by is set conservatively to allow us to create 16,384
357   // 12 byte Thunks at any offset in a ThunkSection without risk of a branch to
358   // one of the Thunks going out of range.
359 
360   // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch
361   // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except
362   // ARMv6T2) the range is +/- 4MiB.
363 
364   return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
365                                          : 0x400000 - 0x7500;
366 }
367 
368 bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
369   if ((dst & 0x1) == 0)
370     // Destination is ARM, if ARM caller then Src is already 4-byte aligned.
371     // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
372     // destination will be 4 byte aligned.
373     src &= ~0x3;
374   else
375     // Bit 0 == 1 denotes Thumb state, it is not part of the range.
376     dst &= ~0x1;
377 
378   int64_t offset = dst - src;
379   switch (type) {
380   case R_ARM_PC24:
381   case R_ARM_PLT32:
382   case R_ARM_JUMP24:
383   case R_ARM_CALL:
384     return llvm::isInt<26>(offset);
385   case R_ARM_THM_JUMP19:
386     return llvm::isInt<21>(offset);
387   case R_ARM_THM_JUMP24:
388   case R_ARM_THM_CALL:
389     return config->armJ1J2BranchEncoding ? llvm::isInt<25>(offset)
390                                          : llvm::isInt<23>(offset);
391   default:
392     return true;
393   }
394 }
395 
396 // Helper to produce message text when LLD detects that a CALL relocation to
397 // a non STT_FUNC symbol that may result in incorrect interworking between ARM
398 // or Thumb.
399 static void stateChangeWarning(uint8_t *loc, RelType relt, const Symbol &s) {
400   assert(!s.isFunc());
401   const ErrorPlace place = getErrorPlace(loc);
402   std::string hint;
403   if (!place.srcLoc.empty())
404     hint = "; " + place.srcLoc;
405   if (s.isSection()) {
406     // Section symbols must be defined and in a section. Users cannot change
407     // the type. Use the section name as getName() returns an empty string.
408     warn(place.loc + "branch and link relocation: " + toString(relt) +
409          " to STT_SECTION symbol " + cast<Defined>(s).section->name +
410          " ; interworking not performed" + hint);
411   } else {
412     // Warn with hint on how to alter the symbol type.
413     warn(getErrorLocation(loc) + "branch and link relocation: " +
414          toString(relt) + " to non STT_FUNC symbol: " + s.getName() +
415          " interworking not performed; consider using directive '.type " +
416          s.getName() +
417          ", %function' to give symbol type STT_FUNC if interworking between "
418          "ARM and Thumb is required" +
419          hint);
420   }
421 }
422 
423 // Rotate a 32-bit unsigned value right by a specified amt of bits.
424 static uint32_t rotr32(uint32_t val, uint32_t amt) {
425   assert(amt < 32 && "Invalid rotate amount");
426   return (val >> amt) | (val << ((32 - amt) & 31));
427 }
428 
429 static std::pair<uint32_t, uint32_t> getRemAndLZForGroup(unsigned group,
430                                                          uint32_t val) {
431   uint32_t rem, lz;
432   do {
433     lz = llvm::countLeadingZeros(val) & ~1;
434     rem = val;
435     if (lz == 32) // implies rem == 0
436       break;
437     val &= 0xffffff >> lz;
438   } while (group--);
439   return {rem, lz};
440 }
441 
442 static void encodeAluGroup(uint8_t *loc, const Relocation &rel, uint64_t val,
443                            int group, bool check) {
444   // ADD/SUB (immediate) add = bit23, sub = bit22
445   // immediate field carries is a 12-bit modified immediate, made up of a 4-bit
446   // even rotate right and an 8-bit immediate.
447   uint32_t opcode = 0x00800000;
448   if (val >> 63) {
449     opcode = 0x00400000;
450     val = -val;
451   }
452   uint32_t imm, lz;
453   std::tie(imm, lz) = getRemAndLZForGroup(group, val);
454   uint32_t rot = 0;
455   if (lz < 24) {
456     imm = rotr32(imm, 24 - lz);
457     rot = (lz + 8) << 7;
458   }
459   if (check && imm > 0xff)
460     error(getErrorLocation(loc) + "unencodeable immediate " + Twine(val).str() +
461           " for relocation " + toString(rel.type));
462   write32le(loc, (read32le(loc) & 0xff3ff000) | opcode | rot | (imm & 0xff));
463 }
464 
465 static void encodeLdrGroup(uint8_t *loc, const Relocation &rel, uint64_t val,
466                            int group) {
467   // R_ARM_LDR_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a
468   // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
469   // bottom bit to recover S + A - P.
470   if (rel.sym->isFunc())
471     val &= ~0x1;
472   // LDR (literal) u = bit23
473   uint32_t opcode = 0x00800000;
474   if (val >> 63) {
475     opcode = 0x0;
476     val = -val;
477   }
478   uint32_t imm = getRemAndLZForGroup(group, val).first;
479   checkUInt(loc, imm, 12, rel);
480   write32le(loc, (read32le(loc) & 0xff7ff000) | opcode | imm);
481 }
482 
483 static void encodeLdrsGroup(uint8_t *loc, const Relocation &rel, uint64_t val,
484                             int group) {
485   // R_ARM_LDRS_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a
486   // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
487   // bottom bit to recover S + A - P.
488   if (rel.sym->isFunc())
489     val &= ~0x1;
490   // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23
491   uint32_t opcode = 0x00800000;
492   if (val >> 63) {
493     opcode = 0x0;
494     val = -val;
495   }
496   uint32_t imm = getRemAndLZForGroup(group, val).first;
497   checkUInt(loc, imm, 8, rel);
498   write32le(loc, (read32le(loc) & 0xff7ff0f0) | opcode | ((imm & 0xf0) << 4) |
499                      (imm & 0xf));
500 }
501 
502 void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
503   switch (rel.type) {
504   case R_ARM_ABS32:
505   case R_ARM_BASE_PREL:
506   case R_ARM_GOTOFF32:
507   case R_ARM_GOT_BREL:
508   case R_ARM_GOT_PREL:
509   case R_ARM_REL32:
510   case R_ARM_RELATIVE:
511   case R_ARM_SBREL32:
512   case R_ARM_TARGET1:
513   case R_ARM_TARGET2:
514   case R_ARM_TLS_GD32:
515   case R_ARM_TLS_IE32:
516   case R_ARM_TLS_LDM32:
517   case R_ARM_TLS_LDO32:
518   case R_ARM_TLS_LE32:
519   case R_ARM_TLS_TPOFF32:
520   case R_ARM_TLS_DTPOFF32:
521     write32le(loc, val);
522     break;
523   case R_ARM_PREL31:
524     checkInt(loc, val, 31, rel);
525     write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000));
526     break;
527   case R_ARM_CALL: {
528     // R_ARM_CALL is used for BL and BLX instructions, for symbols of type
529     // STT_FUNC we choose whether to write a BL or BLX depending on the
530     // value of bit 0 of Val. With bit 0 == 1 denoting Thumb. If the symbol is
531     // not of type STT_FUNC then we must preserve the original instruction.
532     // PLT entries are always ARM state so we know we don't need to interwork.
533     assert(rel.sym); // R_ARM_CALL is always reached via relocate().
534     bool bit0Thumb = val & 1;
535     bool isBlx = (read32le(loc) & 0xfe000000) == 0xfa000000;
536     // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
537     // even when type not STT_FUNC.
538     if (!rel.sym->isFunc() && isBlx != bit0Thumb)
539       stateChangeWarning(loc, rel.type, *rel.sym);
540     if (rel.sym->isFunc() ? bit0Thumb : isBlx) {
541       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
542       checkInt(loc, val, 26, rel);
543       write32le(loc, 0xfa000000 |                    // opcode
544                          ((val & 2) << 23) |         // H
545                          ((val >> 2) & 0x00ffffff)); // imm24
546       break;
547     }
548     // BLX (always unconditional) instruction to an ARM Target, select an
549     // unconditional BL.
550     write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff));
551     // fall through as BL encoding is shared with B
552   }
553     LLVM_FALLTHROUGH;
554   case R_ARM_JUMP24:
555   case R_ARM_PC24:
556   case R_ARM_PLT32:
557     checkInt(loc, val, 26, rel);
558     write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff));
559     break;
560   case R_ARM_THM_JUMP8:
561     // We do a 9 bit check because val is right-shifted by 1 bit.
562     checkInt(loc, val, 9, rel);
563     write16le(loc, (read32le(loc) & 0xff00) | ((val >> 1) & 0x00ff));
564     break;
565   case R_ARM_THM_JUMP11:
566     // We do a 12 bit check because val is right-shifted by 1 bit.
567     checkInt(loc, val, 12, rel);
568     write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff));
569     break;
570   case R_ARM_THM_JUMP19:
571     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
572     checkInt(loc, val, 21, rel);
573     write16le(loc,
574               (read16le(loc) & 0xfbc0) |   // opcode cond
575                   ((val >> 10) & 0x0400) | // S
576                   ((val >> 12) & 0x003f)); // imm6
577     write16le(loc + 2,
578               0x8000 |                    // opcode
579                   ((val >> 8) & 0x0800) | // J2
580                   ((val >> 5) & 0x2000) | // J1
581                   ((val >> 1) & 0x07ff)); // imm11
582     break;
583   case R_ARM_THM_CALL: {
584     // R_ARM_THM_CALL is used for BL and BLX instructions, for symbols of type
585     // STT_FUNC we choose whether to write a BL or BLX depending on the
586     // value of bit 0 of Val. With bit 0 == 0 denoting ARM, if the symbol is
587     // not of type STT_FUNC then we must preserve the original instruction.
588     // PLT entries are always ARM state so we know we need to interwork.
589     assert(rel.sym); // R_ARM_THM_CALL is always reached via relocate().
590     bool bit0Thumb = val & 1;
591     bool isBlx = (read16le(loc + 2) & 0x1000) == 0;
592     // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
593     // even when type not STT_FUNC. PLT entries generated by LLD are always ARM.
594     if (!rel.sym->isFunc() && !rel.sym->isInPlt() && isBlx == bit0Thumb)
595       stateChangeWarning(loc, rel.type, *rel.sym);
596     if (rel.sym->isFunc() || rel.sym->isInPlt() ? !bit0Thumb : isBlx) {
597       // We are writing a BLX. Ensure BLX destination is 4-byte aligned. As
598       // the BLX instruction may only be two byte aligned. This must be done
599       // before overflow check.
600       val = alignTo(val, 4);
601       write16le(loc + 2, read16le(loc + 2) & ~0x1000);
602     } else {
603       write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | 1 << 12);
604     }
605     if (!config->armJ1J2BranchEncoding) {
606       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
607       // different encoding rules and range due to J1 and J2 always being 1.
608       checkInt(loc, val, 23, rel);
609       write16le(loc,
610                 0xf000 |                     // opcode
611                     ((val >> 12) & 0x07ff)); // imm11
612       write16le(loc + 2,
613                 (read16le(loc + 2) & 0xd000) | // opcode
614                     0x2800 |                   // J1 == J2 == 1
615                     ((val >> 1) & 0x07ff));    // imm11
616       break;
617     }
618   }
619     // Fall through as rest of encoding is the same as B.W
620     LLVM_FALLTHROUGH;
621   case R_ARM_THM_JUMP24:
622     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
623     checkInt(loc, val, 25, rel);
624     write16le(loc,
625               0xf000 |                     // opcode
626                   ((val >> 14) & 0x0400) | // S
627                   ((val >> 12) & 0x03ff)); // imm10
628     write16le(loc + 2,
629               (read16le(loc + 2) & 0xd000) |                  // opcode
630                   (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1
631                   (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2
632                   ((val >> 1) & 0x07ff));                     // imm11
633     break;
634   case R_ARM_MOVW_ABS_NC:
635   case R_ARM_MOVW_PREL_NC:
636   case R_ARM_MOVW_BREL_NC:
637     write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) |
638                        (val & 0x0fff));
639     break;
640   case R_ARM_MOVT_ABS:
641   case R_ARM_MOVT_PREL:
642   case R_ARM_MOVT_BREL:
643     write32le(loc, (read32le(loc) & ~0x000f0fff) |
644                        (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff));
645     break;
646   case R_ARM_THM_MOVT_ABS:
647   case R_ARM_THM_MOVT_PREL:
648   case R_ARM_THM_MOVT_BREL:
649     // Encoding T1: A = imm4:i:imm3:imm8
650     write16le(loc,
651               0xf2c0 |                     // opcode
652                   ((val >> 17) & 0x0400) | // i
653                   ((val >> 28) & 0x000f)); // imm4
654     write16le(loc + 2,
655               (read16le(loc + 2) & 0x8f00) | // opcode
656                   ((val >> 12) & 0x7000) |   // imm3
657                   ((val >> 16) & 0x00ff));   // imm8
658     break;
659   case R_ARM_THM_MOVW_ABS_NC:
660   case R_ARM_THM_MOVW_PREL_NC:
661   case R_ARM_THM_MOVW_BREL_NC:
662     // Encoding T3: A = imm4:i:imm3:imm8
663     write16le(loc,
664               0xf240 |                     // opcode
665                   ((val >> 1) & 0x0400) |  // i
666                   ((val >> 12) & 0x000f)); // imm4
667     write16le(loc + 2,
668               (read16le(loc + 2) & 0x8f00) | // opcode
669                   ((val << 4) & 0x7000) |    // imm3
670                   (val & 0x00ff));           // imm8
671     break;
672   case R_ARM_ALU_PC_G0:
673     encodeAluGroup(loc, rel, val, 0, true);
674     break;
675   case R_ARM_ALU_PC_G0_NC:
676     encodeAluGroup(loc, rel, val, 0, false);
677     break;
678   case R_ARM_ALU_PC_G1:
679     encodeAluGroup(loc, rel, val, 1, true);
680     break;
681   case R_ARM_ALU_PC_G1_NC:
682     encodeAluGroup(loc, rel, val, 1, false);
683     break;
684   case R_ARM_ALU_PC_G2:
685     encodeAluGroup(loc, rel, val, 2, true);
686     break;
687   case R_ARM_LDR_PC_G0:
688     encodeLdrGroup(loc, rel, val, 0);
689     break;
690   case R_ARM_LDR_PC_G1:
691     encodeLdrGroup(loc, rel, val, 1);
692     break;
693   case R_ARM_LDR_PC_G2:
694     encodeLdrGroup(loc, rel, val, 2);
695     break;
696   case R_ARM_LDRS_PC_G0:
697     encodeLdrsGroup(loc, rel, val, 0);
698     break;
699   case R_ARM_LDRS_PC_G1:
700     encodeLdrsGroup(loc, rel, val, 1);
701     break;
702   case R_ARM_LDRS_PC_G2:
703     encodeLdrsGroup(loc, rel, val, 2);
704     break;
705   case R_ARM_THM_ALU_PREL_11_0: {
706     // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
707     int64_t imm = val;
708     uint16_t sub = 0;
709     if (imm < 0) {
710       imm = -imm;
711       sub = 0x00a0;
712     }
713     checkUInt(loc, imm, 12, rel);
714     write16le(loc, (read16le(loc) & 0xfb0f) | sub | (imm & 0x800) >> 1);
715     write16le(loc + 2,
716               (read16le(loc + 2) & 0x8f00) | (imm & 0x700) << 4 | (imm & 0xff));
717     break;
718   }
719   case R_ARM_THM_PC8:
720     // ADR and LDR literal encoding T1 positive offset only imm8:00
721     // R_ARM_THM_PC8 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
722     // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
723     // bottom bit to recover S + A - Pa.
724     if (rel.sym->isFunc())
725       val &= ~0x1;
726     checkUInt(loc, val, 10, rel);
727     checkAlignment(loc, val, 4, rel);
728     write16le(loc, (read16le(loc) & 0xff00) | (val & 0x3fc) >> 2);
729     break;
730   case R_ARM_THM_PC12: {
731     // LDR (literal) encoding T2, add = (U == '1') imm12
732     // imm12 is unsigned
733     // R_ARM_THM_PC12 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
734     // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
735     // bottom bit to recover S + A - Pa.
736     if (rel.sym->isFunc())
737       val &= ~0x1;
738     int64_t imm12 = val;
739     uint16_t u = 0x0080;
740     if (imm12 < 0) {
741       imm12 = -imm12;
742       u = 0;
743     }
744     checkUInt(loc, imm12, 12, rel);
745     write16le(loc, read16le(loc) | u);
746     write16le(loc + 2, (read16le(loc + 2) & 0xf000) | imm12);
747     break;
748   }
749   default:
750     llvm_unreachable("unknown relocation");
751   }
752 }
753 
754 int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
755   switch (type) {
756   default:
757     internalLinkerError(getErrorLocation(buf),
758                         "cannot read addend for relocation " + toString(type));
759     return 0;
760   case R_ARM_ABS32:
761   case R_ARM_BASE_PREL:
762   case R_ARM_GLOB_DAT:
763   case R_ARM_GOTOFF32:
764   case R_ARM_GOT_BREL:
765   case R_ARM_GOT_PREL:
766   case R_ARM_IRELATIVE:
767   case R_ARM_REL32:
768   case R_ARM_RELATIVE:
769   case R_ARM_SBREL32:
770   case R_ARM_TARGET1:
771   case R_ARM_TARGET2:
772   case R_ARM_TLS_DTPMOD32:
773   case R_ARM_TLS_DTPOFF32:
774   case R_ARM_TLS_GD32:
775   case R_ARM_TLS_IE32:
776   case R_ARM_TLS_LDM32:
777   case R_ARM_TLS_LE32:
778   case R_ARM_TLS_LDO32:
779   case R_ARM_TLS_TPOFF32:
780     return SignExtend64<32>(read32le(buf));
781   case R_ARM_PREL31:
782     return SignExtend64<31>(read32le(buf));
783   case R_ARM_CALL:
784   case R_ARM_JUMP24:
785   case R_ARM_PC24:
786   case R_ARM_PLT32:
787     return SignExtend64<26>(read32le(buf) << 2);
788   case R_ARM_THM_JUMP8:
789     return SignExtend64<9>(read16le(buf) << 1);
790   case R_ARM_THM_JUMP11:
791     return SignExtend64<12>(read16le(buf) << 1);
792   case R_ARM_THM_JUMP19: {
793     // Encoding T3: A = S:J2:J1:imm10:imm6:0
794     uint16_t hi = read16le(buf);
795     uint16_t lo = read16le(buf + 2);
796     return SignExtend64<20>(((hi & 0x0400) << 10) | // S
797                             ((lo & 0x0800) << 8) |  // J2
798                             ((lo & 0x2000) << 5) |  // J1
799                             ((hi & 0x003f) << 12) | // imm6
800                             ((lo & 0x07ff) << 1));  // imm11:0
801   }
802   case R_ARM_THM_CALL:
803     if (!config->armJ1J2BranchEncoding) {
804       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
805       // different encoding rules and range due to J1 and J2 always being 1.
806       uint16_t hi = read16le(buf);
807       uint16_t lo = read16le(buf + 2);
808       return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11
809                               ((lo & 0x7ff) << 1));  // imm11:0
810       break;
811     }
812     LLVM_FALLTHROUGH;
813   case R_ARM_THM_JUMP24: {
814     // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
815     // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
816     uint16_t hi = read16le(buf);
817     uint16_t lo = read16le(buf + 2);
818     return SignExtend64<24>(((hi & 0x0400) << 14) |                    // S
819                             (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1
820                             (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2
821                             ((hi & 0x003ff) << 12) |                   // imm0
822                             ((lo & 0x007ff) << 1)); // imm11:0
823   }
824   // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
825   // MOVT is in the range -32768 <= A < 32768
826   case R_ARM_MOVW_ABS_NC:
827   case R_ARM_MOVT_ABS:
828   case R_ARM_MOVW_PREL_NC:
829   case R_ARM_MOVT_PREL:
830   case R_ARM_MOVW_BREL_NC:
831   case R_ARM_MOVT_BREL: {
832     uint64_t val = read32le(buf) & 0x000f0fff;
833     return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff));
834   }
835   case R_ARM_THM_MOVW_ABS_NC:
836   case R_ARM_THM_MOVT_ABS:
837   case R_ARM_THM_MOVW_PREL_NC:
838   case R_ARM_THM_MOVT_PREL:
839   case R_ARM_THM_MOVW_BREL_NC:
840   case R_ARM_THM_MOVT_BREL: {
841     // Encoding T3: A = imm4:i:imm3:imm8
842     uint16_t hi = read16le(buf);
843     uint16_t lo = read16le(buf + 2);
844     return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4
845                             ((hi & 0x0400) << 1) |  // i
846                             ((lo & 0x7000) >> 4) |  // imm3
847                             (lo & 0x00ff));         // imm8
848   }
849   case R_ARM_ALU_PC_G0:
850   case R_ARM_ALU_PC_G0_NC:
851   case R_ARM_ALU_PC_G1:
852   case R_ARM_ALU_PC_G1_NC:
853   case R_ARM_ALU_PC_G2: {
854     // 12-bit immediate is a modified immediate made up of a 4-bit even
855     // right rotation and 8-bit constant. After the rotation the value
856     // is zero-extended. When bit 23 is set the instruction is an add, when
857     // bit 22 is set it is a sub.
858     uint32_t instr = read32le(buf);
859     uint32_t val = rotr32(instr & 0xff, ((instr & 0xf00) >> 8) * 2);
860     return (instr & 0x00400000) ? -val : val;
861   }
862   case R_ARM_LDR_PC_G0:
863   case R_ARM_LDR_PC_G1:
864   case R_ARM_LDR_PC_G2: {
865     // ADR (literal) add = bit23, sub = bit22
866     // LDR (literal) u = bit23 unsigned imm12
867     bool u = read32le(buf) & 0x00800000;
868     uint32_t imm12 = read32le(buf) & 0xfff;
869     return u ? imm12 : -imm12;
870   }
871   case R_ARM_LDRS_PC_G0:
872   case R_ARM_LDRS_PC_G1:
873   case R_ARM_LDRS_PC_G2: {
874     // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23 unsigned imm8
875     uint32_t opcode = read32le(buf);
876     bool u = opcode & 0x00800000;
877     uint32_t imm4l = opcode & 0xf;
878     uint32_t imm4h = (opcode & 0xf00) >> 4;
879     return u ? (imm4h | imm4l) : -(imm4h | imm4l);
880   }
881   case R_ARM_THM_ALU_PREL_11_0: {
882     // Thumb2 ADR, which is an alias for a sub or add instruction with an
883     // unsigned immediate.
884     // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
885     uint16_t hi = read16le(buf);
886     uint16_t lo = read16le(buf + 2);
887     uint64_t imm = (hi & 0x0400) << 1 | // i
888                    (lo & 0x7000) >> 4 | // imm3
889                    (lo & 0x00ff);       // imm8
890     // For sub, addend is negative, add is positive.
891     return (hi & 0x00f0) ? -imm : imm;
892   }
893   case R_ARM_THM_PC8:
894     // ADR and LDR (literal) encoding T1
895     // From ELF for the ARM Architecture the initial signed addend is formed
896     // from an unsigned field using expression (((imm8:00 + 4) & 0x3ff) – 4)
897     // this trick permits the PC bias of -4 to be encoded using imm8 = 0xff
898     return ((((read16le(buf) & 0xff) << 2) + 4) & 0x3ff) - 4;
899   case R_ARM_THM_PC12: {
900     // LDR (literal) encoding T2, add = (U == '1') imm12
901     bool u = read16le(buf) & 0x0080;
902     uint64_t imm12 = read16le(buf + 2) & 0x0fff;
903     return u ? imm12 : -imm12;
904   }
905   case R_ARM_NONE:
906   case R_ARM_V4BX:
907   case R_ARM_JUMP_SLOT:
908     // These relocations are defined as not having an implicit addend.
909     return 0;
910   }
911 }
912 
913 TargetInfo *elf::getARMTargetInfo() {
914   static ARM target;
915   return &target;
916 }
917