xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp (revision 36b606ae6aa4b24061096ba18582e0a08ccd5dba)
1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSectionMachO.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCTargetOptions.h"
32 #include "llvm/MC/MCValue.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/raw_ostream.h"
38 using namespace llvm;
39 
40 namespace {
41 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
42 public:
ARMELFObjectWriter(uint8_t OSABI)43   ARMELFObjectWriter(uint8_t OSABI)
44       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
45                                 /*HasRelocationAddend*/ false) {}
46 };
47 } // end anonymous namespace
48 
getFixupKind(StringRef Name) const49 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
50   return std::nullopt;
51 }
52 
53 std::optional<MCFixupKind>
getFixupKind(StringRef Name) const54 ARMAsmBackendELF::getFixupKind(StringRef Name) const {
55   unsigned Type = llvm::StringSwitch<unsigned>(Name)
56 #define ELF_RELOC(X, Y) .Case(#X, Y)
57 #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
58 #undef ELF_RELOC
59                       .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
60                       .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
61                       .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
62                       .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
63                       .Default(-1u);
64   if (Type == -1u)
65     return std::nullopt;
66   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
67 }
68 
getFixupKindInfo(MCFixupKind Kind) const69 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
70   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
71       // This table *must* be in the order that the fixup_* kinds are defined in
72       // ARMFixupKinds.h.
73       //
74       // Name                      Offset (bits) Size (bits)     Flags
75       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
76       {"fixup_t2_ldst_pcrel_12", 0, 32,
77        MCFixupKindInfo::FKF_IsPCRel |
78            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
79       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
80       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
81       {"fixup_t2_pcrel_10", 0, 32,
82        MCFixupKindInfo::FKF_IsPCRel |
83            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
84       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
85       {"fixup_t2_pcrel_9", 0, 32,
86        MCFixupKindInfo::FKF_IsPCRel |
87            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
88       {"fixup_arm_ldst_abs_12", 0, 32, 0},
89       {"fixup_thumb_adr_pcrel_10", 0, 8,
90        MCFixupKindInfo::FKF_IsPCRel |
91            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
92       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
93       {"fixup_t2_adr_pcrel_12", 0, 32,
94        MCFixupKindInfo::FKF_IsPCRel |
95            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
96       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
97       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
98       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
99       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
100       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
101       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
102       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
103       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
104       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
105       {"fixup_arm_thumb_blx", 0, 32,
106        MCFixupKindInfo::FKF_IsPCRel |
107            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
108       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
109       {"fixup_arm_thumb_cp", 0, 8,
110        MCFixupKindInfo::FKF_IsPCRel |
111            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
112       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
113       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
114       // - 19.
115       {"fixup_arm_movt_hi16", 0, 20, 0},
116       {"fixup_arm_movw_lo16", 0, 20, 0},
117       {"fixup_t2_movt_hi16", 0, 20, 0},
118       {"fixup_t2_movw_lo16", 0, 20, 0},
119       {"fixup_arm_thumb_upper_8_15", 0, 8, 0},
120       {"fixup_arm_thumb_upper_0_7", 0, 8, 0},
121       {"fixup_arm_thumb_lower_8_15", 0, 8, 0},
122       {"fixup_arm_thumb_lower_0_7", 0, 8, 0},
123       {"fixup_arm_mod_imm", 0, 12, 0},
124       {"fixup_t2_so_imm", 0, 26, 0},
125       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
126       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
127       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
128       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
129       {"fixup_bfcsel_else_target", 0, 32, 0},
130       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
131       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
132   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
133       // This table *must* be in the order that the fixup_* kinds are defined in
134       // ARMFixupKinds.h.
135       //
136       // Name                      Offset (bits) Size (bits)     Flags
137       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
138       {"fixup_t2_ldst_pcrel_12", 0, 32,
139        MCFixupKindInfo::FKF_IsPCRel |
140            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
141       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
142       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
143       {"fixup_t2_pcrel_10", 0, 32,
144        MCFixupKindInfo::FKF_IsPCRel |
145            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
146       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
147       {"fixup_t2_pcrel_9", 0, 32,
148        MCFixupKindInfo::FKF_IsPCRel |
149            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
150       {"fixup_arm_ldst_abs_12", 0, 32, 0},
151       {"fixup_thumb_adr_pcrel_10", 8, 8,
152        MCFixupKindInfo::FKF_IsPCRel |
153            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
154       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
155       {"fixup_t2_adr_pcrel_12", 0, 32,
156        MCFixupKindInfo::FKF_IsPCRel |
157            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
158       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
159       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
160       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
161       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
162       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
163       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
164       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
165       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
166       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
167       {"fixup_arm_thumb_blx", 0, 32,
168        MCFixupKindInfo::FKF_IsPCRel |
169            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
170       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
171       {"fixup_arm_thumb_cp", 8, 8,
172        MCFixupKindInfo::FKF_IsPCRel |
173            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
174       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
175       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
176       // - 19.
177       {"fixup_arm_movt_hi16", 12, 20, 0},
178       {"fixup_arm_movw_lo16", 12, 20, 0},
179       {"fixup_t2_movt_hi16", 12, 20, 0},
180       {"fixup_t2_movw_lo16", 12, 20, 0},
181       {"fixup_arm_thumb_upper_8_15", 24, 8, 0},
182       {"fixup_arm_thumb_upper_0_7", 24, 8, 0},
183       {"fixup_arm_thumb_lower_8_15", 24, 8, 0},
184       {"fixup_arm_thumb_lower_0_7", 24, 8, 0},
185       {"fixup_arm_mod_imm", 20, 12, 0},
186       {"fixup_t2_so_imm", 26, 6, 0},
187       {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
188       {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
189       {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
190       {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
191       {"fixup_bfcsel_else_target", 0, 32, 0},
192       {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
193       {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
194 
195   // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
196   // any extra processing.
197   if (Kind >= FirstLiteralRelocationKind)
198     return MCAsmBackend::getFixupKindInfo(FK_NONE);
199 
200   if (Kind < FirstTargetFixupKind)
201     return MCAsmBackend::getFixupKindInfo(Kind);
202 
203   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
204          "Invalid kind!");
205   return (Endian == llvm::endianness::little
206               ? InfosLE
207               : InfosBE)[Kind - FirstTargetFixupKind];
208 }
209 
handleAssemblerFlag(MCAssemblerFlag Flag)210 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
211   switch (Flag) {
212   default:
213     break;
214   case MCAF_Code16:
215     setIsThumb(true);
216     break;
217   case MCAF_Code32:
218     setIsThumb(false);
219     break;
220   }
221 }
222 
getRelaxedOpcode(unsigned Op,const MCSubtargetInfo & STI) const223 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
224                                          const MCSubtargetInfo &STI) const {
225   bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2);
226   bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps);
227 
228   switch (Op) {
229   default:
230     return Op;
231   case ARM::tBcc:
232     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
233   case ARM::tLDRpci:
234     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
235   case ARM::tADR:
236     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
237   case ARM::tB:
238     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
239   case ARM::tCBZ:
240     return ARM::tHINT;
241   case ARM::tCBNZ:
242     return ARM::tHINT;
243   }
244 }
245 
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const246 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
247                                       const MCSubtargetInfo &STI) const {
248   if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
249     return true;
250   return false;
251 }
252 
checkPCRelOffset(uint64_t Value,int64_t Min,int64_t Max)253 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
254   int64_t Offset = int64_t(Value) - 4;
255   if (Offset < Min || Offset > Max)
256     return "out of range pc-relative fixup value";
257   return nullptr;
258 }
259 
reasonForFixupRelaxation(const MCFixup & Fixup,uint64_t Value) const260 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
261                                                     uint64_t Value) const {
262   switch (Fixup.getTargetKind()) {
263   case ARM::fixup_arm_thumb_br: {
264     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
265     // low bit being an implied zero. There's an implied +4 offset for the
266     // branch, so we adjust the other way here to determine what's
267     // encodable.
268     //
269     // Relax if the value is too big for a (signed) i8.
270     int64_t Offset = int64_t(Value) - 4;
271     if (Offset > 2046 || Offset < -2048)
272       return "out of range pc-relative fixup value";
273     break;
274   }
275   case ARM::fixup_arm_thumb_bcc: {
276     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
277     // low bit being an implied zero. There's an implied +4 offset for the
278     // branch, so we adjust the other way here to determine what's
279     // encodable.
280     //
281     // Relax if the value is too big for a (signed) i8.
282     int64_t Offset = int64_t(Value) - 4;
283     if (Offset > 254 || Offset < -256)
284       return "out of range pc-relative fixup value";
285     break;
286   }
287   case ARM::fixup_thumb_adr_pcrel_10:
288   case ARM::fixup_arm_thumb_cp: {
289     // If the immediate is negative, greater than 1020, or not a multiple
290     // of four, the wide version of the instruction must be used.
291     int64_t Offset = int64_t(Value) - 4;
292     if (Offset & 3)
293       return "misaligned pc-relative fixup value";
294     else if (Offset > 1020 || Offset < 0)
295       return "out of range pc-relative fixup value";
296     break;
297   }
298   case ARM::fixup_arm_thumb_cb: {
299     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
300     // instruction it is actually out of range for the instruction.
301     // It will be changed to a NOP.
302     int64_t Offset = (Value & ~1);
303     if (Offset == 2)
304       return "will be converted to nop";
305     break;
306   }
307   case ARM::fixup_bf_branch:
308     return checkPCRelOffset(Value, 0, 30);
309   case ARM::fixup_bf_target:
310     return checkPCRelOffset(Value, -0x10000, +0xfffe);
311   case ARM::fixup_bfl_target:
312     return checkPCRelOffset(Value, -0x40000, +0x3fffe);
313   case ARM::fixup_bfc_target:
314     return checkPCRelOffset(Value, -0x1000, +0xffe);
315   case ARM::fixup_wls:
316     return checkPCRelOffset(Value, 0, +0xffe);
317   case ARM::fixup_le:
318     // The offset field in the LE and LETP instructions is an 11-bit
319     // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
320     // interpreted as a negative offset from the value read from pc,
321     // i.e. from instruction_address+4.
322     //
323     // So an LE instruction can in principle address the instruction
324     // immediately after itself, or (not very usefully) the address
325     // half way through the 4-byte LE.
326     return checkPCRelOffset(Value, -0xffe, 0);
327   case ARM::fixup_bfcsel_else_target: {
328     if (Value != 2 && Value != 4)
329       return "out of range label-relative fixup value";
330     break;
331   }
332 
333   default:
334     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
335   }
336   return nullptr;
337 }
338 
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value) const339 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
340                                          uint64_t Value) const {
341   return reasonForFixupRelaxation(Fixup, Value);
342 }
343 
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const344 void ARMAsmBackend::relaxInstruction(MCInst &Inst,
345                                      const MCSubtargetInfo &STI) const {
346   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
347 
348   // Return a diagnostic if we get here w/ a bogus instruction.
349   if (RelaxedOp == Inst.getOpcode()) {
350     SmallString<256> Tmp;
351     raw_svector_ostream OS(Tmp);
352     Inst.dump_pretty(OS);
353     OS << "\n";
354     report_fatal_error("unexpected instruction to relax: " + OS.str());
355   }
356 
357   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
358   // have to change the operands too.
359   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
360       RelaxedOp == ARM::tHINT) {
361     MCInst Res;
362     Res.setOpcode(RelaxedOp);
363     Res.addOperand(MCOperand::createImm(0));
364     Res.addOperand(MCOperand::createImm(14));
365     Res.addOperand(MCOperand::createReg(0));
366     Inst = std::move(Res);
367     return;
368   }
369 
370   // The rest of instructions we're relaxing have the same operands.
371   // We just need to update to the proper opcode.
372   Inst.setOpcode(RelaxedOp);
373 }
374 
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const375 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
376                                  const MCSubtargetInfo *STI) const {
377   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
378   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
379   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
380   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
381   if (isThumb()) {
382     const uint16_t nopEncoding =
383         hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
384     uint64_t NumNops = Count / 2;
385     for (uint64_t i = 0; i != NumNops; ++i)
386       support::endian::write(OS, nopEncoding, Endian);
387     if (Count & 1)
388       OS << '\0';
389     return true;
390   }
391   // ARM mode
392   const uint32_t nopEncoding =
393       hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
394   uint64_t NumNops = Count / 4;
395   for (uint64_t i = 0; i != NumNops; ++i)
396     support::endian::write(OS, nopEncoding, Endian);
397   // FIXME: should this function return false when unable to write exactly
398   // 'Count' bytes with NOP encodings?
399   switch (Count % 4) {
400   default:
401     break; // No leftover bytes to write
402   case 1:
403     OS << '\0';
404     break;
405   case 2:
406     OS.write("\0\0", 2);
407     break;
408   case 3:
409     OS.write("\0\0\xa0", 3);
410     break;
411   }
412 
413   return true;
414 }
415 
swapHalfWords(uint32_t Value,bool IsLittleEndian)416 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
417   if (IsLittleEndian) {
418     // Note that the halfwords are stored high first and low second in thumb;
419     // so we need to swap the fixup value here to map properly.
420     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
421     Swapped |= (Value & 0x0000FFFF) << 16;
422     return Swapped;
423   } else
424     return Value;
425 }
426 
joinHalfWords(uint32_t FirstHalf,uint32_t SecondHalf,bool IsLittleEndian)427 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
428                               bool IsLittleEndian) {
429   uint32_t Value;
430 
431   if (IsLittleEndian) {
432     Value = (SecondHalf & 0xFFFF) << 16;
433     Value |= (FirstHalf & 0xFFFF);
434   } else {
435     Value = (SecondHalf & 0xFFFF);
436     Value |= (FirstHalf & 0xFFFF) << 16;
437   }
438 
439   return Value;
440 }
441 
adjustFixupValue(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,uint64_t Value,bool IsResolved,MCContext & Ctx,const MCSubtargetInfo * STI) const442 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
443                                          const MCFixup &Fixup,
444                                          const MCValue &Target, uint64_t Value,
445                                          bool IsResolved, MCContext &Ctx,
446                                          const MCSubtargetInfo* STI) const {
447   unsigned Kind = Fixup.getKind();
448 
449   // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
450   // and .word relocations they put the Thumb bit into the addend if possible.
451   // Other relocation types don't want this bit though (branches couldn't encode
452   // it if it *was* present, and no other relocations exist) and it can
453   // interfere with checking valid expressions.
454   if (const MCSymbolRefExpr *A = Target.getSymA()) {
455     if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
456         A->getSymbol().isExternal() &&
457         (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
458          Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
459          Kind == ARM::fixup_t2_movt_hi16))
460       Value |= 1;
461   }
462 
463   switch (Kind) {
464   default:
465     return 0;
466   case FK_Data_1:
467   case FK_Data_2:
468   case FK_Data_4:
469     return Value;
470   case FK_SecRel_2:
471     return Value;
472   case FK_SecRel_4:
473     return Value;
474   case ARM::fixup_arm_movt_hi16:
475     assert(STI != nullptr);
476     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
477       Value >>= 16;
478     [[fallthrough]];
479   case ARM::fixup_arm_movw_lo16: {
480     unsigned Hi4 = (Value & 0xF000) >> 12;
481     unsigned Lo12 = Value & 0x0FFF;
482     // inst{19-16} = Hi4;
483     // inst{11-0} = Lo12;
484     Value = (Hi4 << 16) | (Lo12);
485     return Value;
486   }
487   case ARM::fixup_t2_movt_hi16:
488     assert(STI != nullptr);
489     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
490       Value >>= 16;
491     [[fallthrough]];
492   case ARM::fixup_t2_movw_lo16: {
493     unsigned Hi4 = (Value & 0xF000) >> 12;
494     unsigned i = (Value & 0x800) >> 11;
495     unsigned Mid3 = (Value & 0x700) >> 8;
496     unsigned Lo8 = Value & 0x0FF;
497     // inst{19-16} = Hi4;
498     // inst{26} = i;
499     // inst{14-12} = Mid3;
500     // inst{7-0} = Lo8;
501     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
502     return swapHalfWords(Value, Endian == llvm::endianness::little);
503   }
504   case ARM::fixup_arm_thumb_upper_8_15:
505     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
506       return (Value & 0xff000000) >> 24;
507     return Value & 0xff;
508   case ARM::fixup_arm_thumb_upper_0_7:
509     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
510       return (Value & 0x00ff0000) >> 16;
511     return Value & 0xff;
512   case ARM::fixup_arm_thumb_lower_8_15:
513     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
514       return (Value & 0x0000ff00) >> 8;
515     return Value & 0xff;
516   case ARM::fixup_arm_thumb_lower_0_7:
517     return Value & 0x000000ff;
518   case ARM::fixup_arm_ldst_pcrel_12:
519     // ARM PC-relative values are offset by 8.
520     Value -= 4;
521     [[fallthrough]];
522   case ARM::fixup_t2_ldst_pcrel_12:
523     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
524     Value -= 4;
525     [[fallthrough]];
526   case ARM::fixup_arm_ldst_abs_12: {
527     bool isAdd = true;
528     if ((int64_t)Value < 0) {
529       Value = -Value;
530       isAdd = false;
531     }
532     if (Value >= 4096) {
533       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
534       return 0;
535     }
536     Value |= isAdd << 23;
537 
538     // Same addressing mode as fixup_arm_pcrel_10,
539     // but with 16-bit halfwords swapped.
540     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
541       return swapHalfWords(Value, Endian == llvm::endianness::little);
542 
543     return Value;
544   }
545   case ARM::fixup_arm_adr_pcrel_12: {
546     // ARM PC-relative values are offset by 8.
547     Value -= 8;
548     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
549     if ((int64_t)Value < 0) {
550       Value = -Value;
551       opc = 2; // 0b0010
552     }
553     if (ARM_AM::getSOImmVal(Value) == -1) {
554       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
555       return 0;
556     }
557     // Encode the immediate and shift the opcode into place.
558     return ARM_AM::getSOImmVal(Value) | (opc << 21);
559   }
560 
561   case ARM::fixup_t2_adr_pcrel_12: {
562     Value -= 4;
563     unsigned opc = 0;
564     if ((int64_t)Value < 0) {
565       Value = -Value;
566       opc = 5;
567     }
568 
569     uint32_t out = (opc << 21);
570     out |= (Value & 0x800) << 15;
571     out |= (Value & 0x700) << 4;
572     out |= (Value & 0x0FF);
573 
574     return swapHalfWords(out, Endian == llvm::endianness::little);
575   }
576 
577   case ARM::fixup_arm_condbranch:
578   case ARM::fixup_arm_uncondbranch:
579   case ARM::fixup_arm_uncondbl:
580   case ARM::fixup_arm_condbl:
581   case ARM::fixup_arm_blx:
582     // These values don't encode the low two bits since they're always zero.
583     // Offset by 8 just as above.
584     if (const MCSymbolRefExpr *SRE =
585             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
586       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
587         return 0;
588     return 0xffffff & ((Value - 8) >> 2);
589   case ARM::fixup_t2_uncondbranch: {
590     Value = Value - 4;
591     if (!isInt<25>(Value)) {
592       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
593       return 0;
594     }
595 
596     Value >>= 1; // Low bit is not encoded.
597 
598     uint32_t out = 0;
599     bool I = Value & 0x800000;
600     bool J1 = Value & 0x400000;
601     bool J2 = Value & 0x200000;
602     J1 ^= I;
603     J2 ^= I;
604 
605     out |= I << 26;                 // S bit
606     out |= !J1 << 13;               // J1 bit
607     out |= !J2 << 11;               // J2 bit
608     out |= (Value & 0x1FF800) << 5; // imm6 field
609     out |= (Value & 0x0007FF);      // imm11 field
610 
611     return swapHalfWords(out, Endian == llvm::endianness::little);
612   }
613   case ARM::fixup_t2_condbranch: {
614     Value = Value - 4;
615     if (!isInt<21>(Value)) {
616       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
617       return 0;
618     }
619 
620     Value >>= 1; // Low bit is not encoded.
621 
622     uint64_t out = 0;
623     out |= (Value & 0x80000) << 7; // S bit
624     out |= (Value & 0x40000) >> 7; // J2 bit
625     out |= (Value & 0x20000) >> 4; // J1 bit
626     out |= (Value & 0x1F800) << 5; // imm6 field
627     out |= (Value & 0x007FF);      // imm11 field
628 
629     return swapHalfWords(out, Endian == llvm::endianness::little);
630   }
631   case ARM::fixup_arm_thumb_bl: {
632     if (!isInt<25>(Value - 4) ||
633         (!STI->hasFeature(ARM::FeatureThumb2) &&
634          !STI->hasFeature(ARM::HasV8MBaselineOps) &&
635          !STI->hasFeature(ARM::HasV6MOps) &&
636          !isInt<23>(Value - 4))) {
637       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
638       return 0;
639     }
640 
641     // The value doesn't encode the low bit (always zero) and is offset by
642     // four. The 32-bit immediate value is encoded as
643     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
644     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
645     // The value is encoded into disjoint bit positions in the destination
646     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
647     // J = either J1 or J2 bit
648     //
649     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
650     //
651     // Note that the halfwords are stored high first, low second; so we need
652     // to transpose the fixup value here to map properly.
653     uint32_t offset = (Value - 4) >> 1;
654     uint32_t signBit = (offset & 0x800000) >> 23;
655     uint32_t I1Bit = (offset & 0x400000) >> 22;
656     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
657     uint32_t I2Bit = (offset & 0x200000) >> 21;
658     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
659     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
660     uint32_t imm11Bits = (offset & 0x000007FF);
661 
662     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
663     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
664                            (uint16_t)imm11Bits);
665     return joinHalfWords(FirstHalf, SecondHalf,
666                          Endian == llvm::endianness::little);
667   }
668   case ARM::fixup_arm_thumb_blx: {
669     // The value doesn't encode the low two bits (always zero) and is offset by
670     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
671     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
672     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
673     // The value is encoded into disjoint bit positions in the destination
674     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
675     // J = either J1 or J2 bit, 0 = zero.
676     //
677     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
678     //
679     // Note that the halfwords are stored high first, low second; so we need
680     // to transpose the fixup value here to map properly.
681     if (Value % 4 != 0) {
682       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
683       return 0;
684     }
685 
686     uint32_t offset = (Value - 4) >> 2;
687     if (const MCSymbolRefExpr *SRE =
688             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
689       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
690         offset = 0;
691     uint32_t signBit = (offset & 0x400000) >> 22;
692     uint32_t I1Bit = (offset & 0x200000) >> 21;
693     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
694     uint32_t I2Bit = (offset & 0x100000) >> 20;
695     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
696     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
697     uint32_t imm10LBits = (offset & 0x3FF);
698 
699     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
700     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
701                            ((uint16_t)imm10LBits) << 1);
702     return joinHalfWords(FirstHalf, SecondHalf,
703                          Endian == llvm::endianness::little);
704   }
705   case ARM::fixup_thumb_adr_pcrel_10:
706   case ARM::fixup_arm_thumb_cp:
707     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
708     // could have an error on our hands.
709     assert(STI != nullptr);
710     if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) {
711       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
712       if (FixupDiagnostic) {
713         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
714         return 0;
715       }
716     }
717     // Offset by 4, and don't encode the low two bits.
718     return ((Value - 4) >> 2) & 0xff;
719   case ARM::fixup_arm_thumb_cb: {
720     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
721     // so ensure that the raw value LSB is zero and it lies in [2, 130].
722     // An offset of 2 will be relaxed to a NOP.
723     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
724       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
725       return 0;
726     }
727     // Offset by 4 and don't encode the lower bit, which is always 0.
728     // FIXME: diagnose if no Thumb2
729     uint32_t Binary = (Value - 4) >> 1;
730     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
731   }
732   case ARM::fixup_arm_thumb_br:
733     // Offset by 4 and don't encode the lower bit, which is always 0.
734     assert(STI != nullptr);
735     if (!STI->hasFeature(ARM::FeatureThumb2) &&
736         !STI->hasFeature(ARM::HasV8MBaselineOps)) {
737       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
738       if (FixupDiagnostic) {
739         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
740         return 0;
741       }
742     }
743     return ((Value - 4) >> 1) & 0x7ff;
744   case ARM::fixup_arm_thumb_bcc:
745     // Offset by 4 and don't encode the lower bit, which is always 0.
746     assert(STI != nullptr);
747     if (!STI->hasFeature(ARM::FeatureThumb2)) {
748       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
749       if (FixupDiagnostic) {
750         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
751         return 0;
752       }
753     }
754     return ((Value - 4) >> 1) & 0xff;
755   case ARM::fixup_arm_pcrel_10_unscaled: {
756     Value = Value - 8; // ARM fixups offset by an additional word and don't
757                        // need to adjust for the half-word ordering.
758     bool isAdd = true;
759     if ((int64_t)Value < 0) {
760       Value = -Value;
761       isAdd = false;
762     }
763     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
764     if (Value >= 256) {
765       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
766       return 0;
767     }
768     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
769     return Value | (isAdd << 23);
770   }
771   case ARM::fixup_arm_pcrel_10:
772     Value = Value - 4; // ARM fixups offset by an additional word and don't
773                        // need to adjust for the half-word ordering.
774     [[fallthrough]];
775   case ARM::fixup_t2_pcrel_10: {
776     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
777     Value = Value - 4;
778     bool isAdd = true;
779     if ((int64_t)Value < 0) {
780       Value = -Value;
781       isAdd = false;
782     }
783     // These values don't encode the low two bits since they're always zero.
784     Value >>= 2;
785     if (Value >= 256) {
786       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
787       return 0;
788     }
789     Value |= isAdd << 23;
790 
791     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
792     // swapped.
793     if (Kind == ARM::fixup_t2_pcrel_10)
794       return swapHalfWords(Value, Endian == llvm::endianness::little);
795 
796     return Value;
797   }
798   case ARM::fixup_arm_pcrel_9:
799     Value = Value - 4; // ARM fixups offset by an additional word and don't
800                        // need to adjust for the half-word ordering.
801     [[fallthrough]];
802   case ARM::fixup_t2_pcrel_9: {
803     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
804     Value = Value - 4;
805     bool isAdd = true;
806     if ((int64_t)Value < 0) {
807       Value = -Value;
808       isAdd = false;
809     }
810     // These values don't encode the low bit since it's always zero.
811     if (Value & 1) {
812       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
813       return 0;
814     }
815     Value >>= 1;
816     if (Value >= 256) {
817       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
818       return 0;
819     }
820     Value |= isAdd << 23;
821 
822     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
823     // swapped.
824     if (Kind == ARM::fixup_t2_pcrel_9)
825       return swapHalfWords(Value, Endian == llvm::endianness::little);
826 
827     return Value;
828   }
829   case ARM::fixup_arm_mod_imm:
830     Value = ARM_AM::getSOImmVal(Value);
831     if (Value >> 12) {
832       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
833       return 0;
834     }
835     return Value;
836   case ARM::fixup_t2_so_imm: {
837     Value = ARM_AM::getT2SOImmVal(Value);
838     if ((int64_t)Value < 0) {
839       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
840       return 0;
841     }
842     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
843     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
844     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
845     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
846     // half-word.
847     uint64_t EncValue = 0;
848     EncValue |= (Value & 0x800) << 15;
849     EncValue |= (Value & 0x700) << 4;
850     EncValue |= (Value & 0xff);
851     return swapHalfWords(EncValue, Endian == llvm::endianness::little);
852   }
853   case ARM::fixup_bf_branch: {
854     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
855     if (FixupDiagnostic) {
856       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
857       return 0;
858     }
859     uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
860     return swapHalfWords(out, Endian == llvm::endianness::little);
861   }
862   case ARM::fixup_bf_target:
863   case ARM::fixup_bfl_target:
864   case ARM::fixup_bfc_target: {
865     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
866     if (FixupDiagnostic) {
867       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
868       return 0;
869     }
870     uint32_t out = 0;
871     uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
872                             Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
873     out |= (((Value - 4) >> 1) & 0x1) << 11;
874     out |= (((Value - 4) >> 1) & 0x7fe);
875     out |= (((Value - 4) >> 1) & HighBitMask) << 5;
876     return swapHalfWords(out, Endian == llvm::endianness::little);
877   }
878   case ARM::fixup_bfcsel_else_target: {
879     // If this is a fixup of a branch future's else target then it should be a
880     // constant MCExpr representing the distance between the branch targetted
881     // and the instruction after that same branch.
882     Value = Target.getConstant();
883 
884     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
885     if (FixupDiagnostic) {
886       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
887       return 0;
888     }
889     uint32_t out = ((Value >> 2) & 1) << 17;
890     return swapHalfWords(out, Endian == llvm::endianness::little);
891   }
892   case ARM::fixup_wls:
893   case ARM::fixup_le: {
894     const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
895     if (FixupDiagnostic) {
896       Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
897       return 0;
898     }
899     uint64_t real_value = Value - 4;
900     uint32_t out = 0;
901     if (Kind == ARM::fixup_le)
902       real_value = -real_value;
903     out |= ((real_value >> 1) & 0x1) << 11;
904     out |= ((real_value >> 1) & 0x7fe);
905     return swapHalfWords(out, Endian == llvm::endianness::little);
906   }
907   }
908 }
909 
shouldForceRelocation(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,const MCSubtargetInfo * STI)910 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
911                                           const MCFixup &Fixup,
912                                           const MCValue &Target,
913                                           const MCSubtargetInfo *STI) {
914   const MCSymbolRefExpr *A = Target.getSymA();
915   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
916   const unsigned FixupKind = Fixup.getKind();
917   if (FixupKind >= FirstLiteralRelocationKind)
918     return true;
919   if (FixupKind == ARM::fixup_arm_thumb_bl) {
920     assert(Sym && "How did we resolve this?");
921 
922     // If the symbol is external the linker will handle it.
923     // FIXME: Should we handle it as an optimization?
924 
925     // If the symbol is out of range, produce a relocation and hope the
926     // linker can handle it. GNU AS produces an error in this case.
927     if (Sym->isExternal())
928       return true;
929   }
930   // Create relocations for unconditional branches to function symbols with
931   // different execution mode in ELF binaries.
932   if (Sym && Sym->isELF()) {
933     unsigned Type = cast<MCSymbolELF>(Sym)->getType();
934     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
935       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
936         return true;
937       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
938                                     FixupKind == ARM::fixup_arm_thumb_bl ||
939                                     FixupKind == ARM::fixup_t2_condbranch ||
940                                     FixupKind == ARM::fixup_t2_uncondbranch))
941         return true;
942     }
943   }
944   // We must always generate a relocation for BL/BLX instructions if we have
945   // a symbol to reference, as the linker relies on knowing the destination
946   // symbol's thumb-ness to get interworking right.
947   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
948             FixupKind == ARM::fixup_arm_blx ||
949             FixupKind == ARM::fixup_arm_uncondbl ||
950             FixupKind == ARM::fixup_arm_condbl))
951     return true;
952   return false;
953 }
954 
955 /// getFixupKindNumBytes - The number of bytes the fixup may change.
getFixupKindNumBytes(unsigned Kind)956 static unsigned getFixupKindNumBytes(unsigned Kind) {
957   switch (Kind) {
958   default:
959     llvm_unreachable("Unknown fixup kind!");
960 
961   case FK_Data_1:
962   case ARM::fixup_arm_thumb_bcc:
963   case ARM::fixup_arm_thumb_cp:
964   case ARM::fixup_thumb_adr_pcrel_10:
965   case ARM::fixup_arm_thumb_upper_8_15:
966   case ARM::fixup_arm_thumb_upper_0_7:
967   case ARM::fixup_arm_thumb_lower_8_15:
968   case ARM::fixup_arm_thumb_lower_0_7:
969     return 1;
970 
971   case FK_Data_2:
972   case ARM::fixup_arm_thumb_br:
973   case ARM::fixup_arm_thumb_cb:
974   case ARM::fixup_arm_mod_imm:
975     return 2;
976 
977   case ARM::fixup_arm_pcrel_10_unscaled:
978   case ARM::fixup_arm_ldst_pcrel_12:
979   case ARM::fixup_arm_pcrel_10:
980   case ARM::fixup_arm_pcrel_9:
981   case ARM::fixup_arm_ldst_abs_12:
982   case ARM::fixup_arm_adr_pcrel_12:
983   case ARM::fixup_arm_uncondbl:
984   case ARM::fixup_arm_condbl:
985   case ARM::fixup_arm_blx:
986   case ARM::fixup_arm_condbranch:
987   case ARM::fixup_arm_uncondbranch:
988     return 3;
989 
990   case FK_Data_4:
991   case ARM::fixup_t2_ldst_pcrel_12:
992   case ARM::fixup_t2_condbranch:
993   case ARM::fixup_t2_uncondbranch:
994   case ARM::fixup_t2_pcrel_10:
995   case ARM::fixup_t2_pcrel_9:
996   case ARM::fixup_t2_adr_pcrel_12:
997   case ARM::fixup_arm_thumb_bl:
998   case ARM::fixup_arm_thumb_blx:
999   case ARM::fixup_arm_movt_hi16:
1000   case ARM::fixup_arm_movw_lo16:
1001   case ARM::fixup_t2_movt_hi16:
1002   case ARM::fixup_t2_movw_lo16:
1003   case ARM::fixup_t2_so_imm:
1004   case ARM::fixup_bf_branch:
1005   case ARM::fixup_bf_target:
1006   case ARM::fixup_bfl_target:
1007   case ARM::fixup_bfc_target:
1008   case ARM::fixup_bfcsel_else_target:
1009   case ARM::fixup_wls:
1010   case ARM::fixup_le:
1011     return 4;
1012 
1013   case FK_SecRel_2:
1014     return 2;
1015   case FK_SecRel_4:
1016     return 4;
1017   }
1018 }
1019 
1020 /// getFixupKindContainerSizeBytes - The number of bytes of the
1021 /// container involved in big endian.
getFixupKindContainerSizeBytes(unsigned Kind)1022 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
1023   switch (Kind) {
1024   default:
1025     llvm_unreachable("Unknown fixup kind!");
1026 
1027   case FK_Data_1:
1028     return 1;
1029   case FK_Data_2:
1030     return 2;
1031   case FK_Data_4:
1032     return 4;
1033 
1034   case ARM::fixup_arm_thumb_bcc:
1035   case ARM::fixup_arm_thumb_cp:
1036   case ARM::fixup_thumb_adr_pcrel_10:
1037   case ARM::fixup_arm_thumb_br:
1038   case ARM::fixup_arm_thumb_cb:
1039   case ARM::fixup_arm_thumb_upper_8_15:
1040   case ARM::fixup_arm_thumb_upper_0_7:
1041   case ARM::fixup_arm_thumb_lower_8_15:
1042   case ARM::fixup_arm_thumb_lower_0_7:
1043     // Instruction size is 2 bytes.
1044     return 2;
1045 
1046   case ARM::fixup_arm_pcrel_10_unscaled:
1047   case ARM::fixup_arm_ldst_pcrel_12:
1048   case ARM::fixup_arm_pcrel_10:
1049   case ARM::fixup_arm_pcrel_9:
1050   case ARM::fixup_arm_adr_pcrel_12:
1051   case ARM::fixup_arm_uncondbl:
1052   case ARM::fixup_arm_condbl:
1053   case ARM::fixup_arm_blx:
1054   case ARM::fixup_arm_condbranch:
1055   case ARM::fixup_arm_uncondbranch:
1056   case ARM::fixup_t2_ldst_pcrel_12:
1057   case ARM::fixup_t2_condbranch:
1058   case ARM::fixup_t2_uncondbranch:
1059   case ARM::fixup_t2_pcrel_10:
1060   case ARM::fixup_t2_pcrel_9:
1061   case ARM::fixup_t2_adr_pcrel_12:
1062   case ARM::fixup_arm_thumb_bl:
1063   case ARM::fixup_arm_thumb_blx:
1064   case ARM::fixup_arm_movt_hi16:
1065   case ARM::fixup_arm_movw_lo16:
1066   case ARM::fixup_t2_movt_hi16:
1067   case ARM::fixup_t2_movw_lo16:
1068   case ARM::fixup_arm_mod_imm:
1069   case ARM::fixup_t2_so_imm:
1070   case ARM::fixup_bf_branch:
1071   case ARM::fixup_bf_target:
1072   case ARM::fixup_bfl_target:
1073   case ARM::fixup_bfc_target:
1074   case ARM::fixup_bfcsel_else_target:
1075   case ARM::fixup_wls:
1076   case ARM::fixup_le:
1077     // Instruction size is 4 bytes.
1078     return 4;
1079   }
1080 }
1081 
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const1082 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1083                                const MCValue &Target,
1084                                MutableArrayRef<char> Data, uint64_t Value,
1085                                bool IsResolved,
1086                                const MCSubtargetInfo* STI) const {
1087   unsigned Kind = Fixup.getKind();
1088   if (Kind >= FirstLiteralRelocationKind)
1089     return;
1090   MCContext &Ctx = Asm.getContext();
1091   Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1092   if (!Value)
1093     return; // Doesn't change encoding.
1094   const unsigned NumBytes = getFixupKindNumBytes(Kind);
1095 
1096   unsigned Offset = Fixup.getOffset();
1097   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1098 
1099   // Used to point to big endian bytes.
1100   unsigned FullSizeBytes;
1101   if (Endian == llvm::endianness::big) {
1102     FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1103     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1104     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1105   }
1106 
1107   // For each byte of the fragment that the fixup touches, mask in the bits from
1108   // the fixup value. The Value has been "split up" into the appropriate
1109   // bitfields above.
1110   for (unsigned i = 0; i != NumBytes; ++i) {
1111     unsigned Idx =
1112         Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
1113     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1114   }
1115 }
1116 
1117 namespace CU {
1118 
1119 /// Compact unwind encoding values.
1120 enum CompactUnwindEncodings {
1121   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
1122   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
1123   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
1124   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
1125 
1126   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
1127 
1128   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
1129   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
1130   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
1131 
1132   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
1133   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
1134   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
1135   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
1136   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
1137 
1138   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
1139 
1140   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
1141 };
1142 
1143 } // end CU namespace
1144 
1145 /// Generate compact unwind encoding for the function based on the CFI
1146 /// instructions. If the CFI instructions describe a frame that cannot be
1147 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1148 /// tells the runtime to fallback and unwind using dwarf.
generateCompactUnwindEncoding(const MCDwarfFrameInfo * FI,const MCContext * Ctxt) const1149 uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1150     const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const {
1151   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1152   // Only armv7k uses CFI based unwinding.
1153   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1154     return 0;
1155   // No .cfi directives means no frame.
1156   ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
1157   if (Instrs.empty())
1158     return 0;
1159   if (!isDarwinCanonicalPersonality(FI->Personality) &&
1160       !Ctxt->emitCompactUnwindNonCanonical())
1161     return CU::UNWIND_ARM_MODE_DWARF;
1162 
1163   // Start off assuming CFA is at SP+0.
1164   unsigned CFARegister = ARM::SP;
1165   int CFARegisterOffset = 0;
1166   // Mark savable registers as initially unsaved
1167   DenseMap<unsigned, int> RegOffsets;
1168   int FloatRegCount = 0;
1169   // Process each .cfi directive and build up compact unwind info.
1170   for (const MCCFIInstruction &Inst : Instrs) {
1171     unsigned Reg;
1172     switch (Inst.getOperation()) {
1173     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1174       CFARegisterOffset = Inst.getOffset();
1175       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1176       break;
1177     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1178       CFARegisterOffset = Inst.getOffset();
1179       break;
1180     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1181       CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1182       break;
1183     case MCCFIInstruction::OpOffset: // DW_CFA_offset
1184       Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1185       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1186         RegOffsets[Reg] = Inst.getOffset();
1187       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1188         RegOffsets[Reg] = Inst.getOffset();
1189         ++FloatRegCount;
1190       } else {
1191         DEBUG_WITH_TYPE("compact-unwind",
1192                         llvm::dbgs() << ".cfi_offset on unknown register="
1193                                      << Inst.getRegister() << "\n");
1194         return CU::UNWIND_ARM_MODE_DWARF;
1195       }
1196       break;
1197     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1198       // Ignore
1199       break;
1200     default:
1201       // Directive not convertable to compact unwind, bail out.
1202       DEBUG_WITH_TYPE("compact-unwind",
1203                       llvm::dbgs()
1204                           << "CFI directive not compatible with compact "
1205                              "unwind encoding, opcode="
1206                           << uint8_t(Inst.getOperation()) << "\n");
1207       return CU::UNWIND_ARM_MODE_DWARF;
1208       break;
1209     }
1210   }
1211 
1212   // If no frame set up, return no unwind info.
1213   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1214     return 0;
1215 
1216   // Verify standard frame (lr/r7) was used.
1217   if (CFARegister != ARM::R7) {
1218     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1219                                                    << CFARegister
1220                                                    << " instead of r7\n");
1221     return CU::UNWIND_ARM_MODE_DWARF;
1222   }
1223   int StackAdjust = CFARegisterOffset - 8;
1224   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1225     DEBUG_WITH_TYPE("compact-unwind",
1226                     llvm::dbgs()
1227                         << "LR not saved as standard frame, StackAdjust="
1228                         << StackAdjust
1229                         << ", CFARegisterOffset=" << CFARegisterOffset
1230                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1231     return CU::UNWIND_ARM_MODE_DWARF;
1232   }
1233   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1234     DEBUG_WITH_TYPE("compact-unwind",
1235                     llvm::dbgs() << "r7 not saved as standard frame\n");
1236     return CU::UNWIND_ARM_MODE_DWARF;
1237   }
1238   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1239 
1240   // If var-args are used, there may be a stack adjust required.
1241   switch (StackAdjust) {
1242   case 0:
1243     break;
1244   case 4:
1245     CompactUnwindEncoding |= 0x00400000;
1246     break;
1247   case 8:
1248     CompactUnwindEncoding |= 0x00800000;
1249     break;
1250   case 12:
1251     CompactUnwindEncoding |= 0x00C00000;
1252     break;
1253   default:
1254     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1255                                           << ".cfi_def_cfa stack adjust ("
1256                                           << StackAdjust << ") out of range\n");
1257     return CU::UNWIND_ARM_MODE_DWARF;
1258   }
1259 
1260   // If r6 is saved, it must be right below r7.
1261   static struct {
1262     unsigned Reg;
1263     unsigned Encoding;
1264   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1265                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1266                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1267                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1268                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1269                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1270                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1271                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1272 
1273   int CurOffset = -8 - StackAdjust;
1274   for (auto CSReg : GPRCSRegs) {
1275     auto Offset = RegOffsets.find(CSReg.Reg);
1276     if (Offset == RegOffsets.end())
1277       continue;
1278 
1279     int RegOffset = Offset->second;
1280     if (RegOffset != CurOffset - 4) {
1281       DEBUG_WITH_TYPE("compact-unwind",
1282                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1283                                    << RegOffset << " but only supported at "
1284                                    << CurOffset << "\n");
1285       return CU::UNWIND_ARM_MODE_DWARF;
1286     }
1287     CompactUnwindEncoding |= CSReg.Encoding;
1288     CurOffset -= 4;
1289   }
1290 
1291   // If no floats saved, we are done.
1292   if (FloatRegCount == 0)
1293     return CompactUnwindEncoding;
1294 
1295   // Switch mode to include D register saving.
1296   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1297   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1298 
1299   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1300   // but needs coordination with the linker and libunwind.
1301   if (FloatRegCount > 4) {
1302     DEBUG_WITH_TYPE("compact-unwind",
1303                     llvm::dbgs() << "unsupported number of D registers saved ("
1304                                  << FloatRegCount << ")\n");
1305       return CU::UNWIND_ARM_MODE_DWARF;
1306   }
1307 
1308   // Floating point registers must either be saved sequentially, or we defer to
1309   // DWARF. No gaps allowed here so check that each saved d-register is
1310   // precisely where it should be.
1311   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1312   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1313     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1314     if (Offset == RegOffsets.end()) {
1315       DEBUG_WITH_TYPE("compact-unwind",
1316                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1317                                    << MRI.getName(FPRCSRegs[Idx])
1318                                    << " not saved\n");
1319       return CU::UNWIND_ARM_MODE_DWARF;
1320     } else if (Offset->second != CurOffset - 8) {
1321       DEBUG_WITH_TYPE("compact-unwind",
1322                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1323                                    << MRI.getName(FPRCSRegs[Idx])
1324                                    << " saved at " << Offset->second
1325                                    << ", expected at " << CurOffset - 8
1326                                    << "\n");
1327       return CU::UNWIND_ARM_MODE_DWARF;
1328     }
1329     CurOffset -= 8;
1330   }
1331 
1332   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1333 }
1334 
createARMAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options,llvm::endianness Endian)1335 static MCAsmBackend *createARMAsmBackend(const Target &T,
1336                                          const MCSubtargetInfo &STI,
1337                                          const MCRegisterInfo &MRI,
1338                                          const MCTargetOptions &Options,
1339                                          llvm::endianness Endian) {
1340   const Triple &TheTriple = STI.getTargetTriple();
1341   switch (TheTriple.getObjectFormat()) {
1342   default:
1343     llvm_unreachable("unsupported object format");
1344   case Triple::MachO:
1345     return new ARMAsmBackendDarwin(T, STI, MRI);
1346   case Triple::COFF:
1347     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1348     return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
1349   case Triple::ELF:
1350     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1351     uint8_t OSABI = Options.FDPIC
1352                         ? ELF::ELFOSABI_ARM_FDPIC
1353                         : MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1354     return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
1355                                 Endian);
1356   }
1357 }
1358 
createARMLEAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)1359 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1360                                           const MCSubtargetInfo &STI,
1361                                           const MCRegisterInfo &MRI,
1362                                           const MCTargetOptions &Options) {
1363   return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::little);
1364 }
1365 
createARMBEAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)1366 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1367                                           const MCSubtargetInfo &STI,
1368                                           const MCRegisterInfo &MRI,
1369                                           const MCTargetOptions &Options) {
1370   return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::big);
1371 }
1372