xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/Triple.h"
14 #include "llvm/BinaryFormat/MachO.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCDirectives.h"
19 #include "llvm/MC/MCELFObjectWriter.h"
20 #include "llvm/MC/MCFixupKindInfo.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionELF.h"
24 #include "llvm/MC/MCSectionMachO.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/MC/MCValue.h"
27 #include "llvm/Support/EndianStream.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/TargetRegistry.h"
30 using namespace llvm;
31 
32 namespace {
33 
34 class AArch64AsmBackend : public MCAsmBackend {
35   static const unsigned PCRelFlagVal =
36       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
37 protected:
38   Triple TheTriple;
39 
40 public:
41   AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
42       : MCAsmBackend(IsLittleEndian ? support::little : support::big),
43         TheTriple(TT) {}
44 
45   unsigned getNumFixupKinds() const override {
46     return AArch64::NumTargetFixupKinds;
47   }
48 
49   Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
50 
51   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
52     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
53         // This table *must* be in the order that the fixup_* kinds are defined
54         // in AArch64FixupKinds.h.
55         //
56         // Name                           Offset (bits) Size (bits)     Flags
57         {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
58         {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
59         {"fixup_aarch64_add_imm12", 10, 12, 0},
60         {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
61         {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
62         {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
63         {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
64         {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
65         {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
66         {"fixup_aarch64_movw", 5, 16, 0},
67         {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
68         {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
69         {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
70         {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal},
71         {"fixup_aarch64_tlsdesc_call", 0, 0, 0}};
72 
73     // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
74     // require any extra processing.
75     if (Kind >= FirstLiteralRelocationKind)
76       return MCAsmBackend::getFixupKindInfo(FK_NONE);
77 
78     if (Kind < FirstTargetFixupKind)
79       return MCAsmBackend::getFixupKindInfo(Kind);
80 
81     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
82            "Invalid kind!");
83     return Infos[Kind - FirstTargetFixupKind];
84   }
85 
86   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
87                   const MCValue &Target, MutableArrayRef<char> Data,
88                   uint64_t Value, bool IsResolved,
89                   const MCSubtargetInfo *STI) const override;
90 
91   bool mayNeedRelaxation(const MCInst &Inst,
92                          const MCSubtargetInfo &STI) const override;
93   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
94                             const MCRelaxableFragment *DF,
95                             const MCAsmLayout &Layout) const override;
96   void relaxInstruction(MCInst &Inst,
97                         const MCSubtargetInfo &STI) const override;
98   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
99 
100   void HandleAssemblerFlag(MCAssemblerFlag Flag) {}
101 
102   unsigned getPointerSize() const { return 8; }
103 
104   unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
105 
106   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
107                              const MCValue &Target) override;
108 };
109 
110 } // end anonymous namespace
111 
112 /// The number of bytes the fixup may change.
113 static unsigned getFixupKindNumBytes(unsigned Kind) {
114   switch (Kind) {
115   default:
116     llvm_unreachable("Unknown fixup kind!");
117 
118   case AArch64::fixup_aarch64_tlsdesc_call:
119     return 0;
120 
121   case FK_Data_1:
122     return 1;
123 
124   case FK_Data_2:
125   case FK_SecRel_2:
126     return 2;
127 
128   case AArch64::fixup_aarch64_movw:
129   case AArch64::fixup_aarch64_pcrel_branch14:
130   case AArch64::fixup_aarch64_add_imm12:
131   case AArch64::fixup_aarch64_ldst_imm12_scale1:
132   case AArch64::fixup_aarch64_ldst_imm12_scale2:
133   case AArch64::fixup_aarch64_ldst_imm12_scale4:
134   case AArch64::fixup_aarch64_ldst_imm12_scale8:
135   case AArch64::fixup_aarch64_ldst_imm12_scale16:
136   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
137   case AArch64::fixup_aarch64_pcrel_branch19:
138     return 3;
139 
140   case AArch64::fixup_aarch64_pcrel_adr_imm21:
141   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
142   case AArch64::fixup_aarch64_pcrel_branch26:
143   case AArch64::fixup_aarch64_pcrel_call26:
144   case FK_Data_4:
145   case FK_SecRel_4:
146     return 4;
147 
148   case FK_Data_8:
149     return 8;
150   }
151 }
152 
153 static unsigned AdrImmBits(unsigned Value) {
154   unsigned lo2 = Value & 0x3;
155   unsigned hi19 = (Value & 0x1ffffc) >> 2;
156   return (hi19 << 5) | (lo2 << 29);
157 }
158 
159 static bool valueFitsIntoFixupKind(unsigned Kind, uint64_t Value) {
160   unsigned NumBits;
161   switch(Kind) {
162   case FK_Data_1: NumBits = 8; break;
163   case FK_Data_2: NumBits = 16; break;
164   case FK_Data_4: NumBits = 32; break;
165   case FK_Data_8: NumBits = 64; break;
166   default: return true;
167   }
168   return isUIntN(NumBits, Value) ||
169     isIntN(NumBits, static_cast<int64_t>(Value));
170 }
171 
172 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
173                                  uint64_t Value, MCContext &Ctx,
174                                  const Triple &TheTriple, bool IsResolved) {
175   int64_t SignedValue = static_cast<int64_t>(Value);
176   switch (Fixup.getTargetKind()) {
177   default:
178     llvm_unreachable("Unknown fixup kind!");
179   case AArch64::fixup_aarch64_pcrel_adr_imm21:
180     if (SignedValue > 2097151 || SignedValue < -2097152)
181       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
182     return AdrImmBits(Value & 0x1fffffULL);
183   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
184     assert(!IsResolved);
185     if (TheTriple.isOSBinFormatCOFF())
186       return AdrImmBits(Value & 0x1fffffULL);
187     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
188   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
189   case AArch64::fixup_aarch64_pcrel_branch19:
190     // Signed 21-bit immediate
191     if (SignedValue > 2097151 || SignedValue < -2097152)
192       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
193     if (Value & 0x3)
194       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
195     // Low two bits are not encoded.
196     return (Value >> 2) & 0x7ffff;
197   case AArch64::fixup_aarch64_add_imm12:
198   case AArch64::fixup_aarch64_ldst_imm12_scale1:
199     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
200       Value &= 0xfff;
201     // Unsigned 12-bit immediate
202     if (Value >= 0x1000)
203       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
204     return Value;
205   case AArch64::fixup_aarch64_ldst_imm12_scale2:
206     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
207       Value &= 0xfff;
208     // Unsigned 12-bit immediate which gets multiplied by 2
209     if (Value >= 0x2000)
210       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
211     if (Value & 0x1)
212       Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
213     return Value >> 1;
214   case AArch64::fixup_aarch64_ldst_imm12_scale4:
215     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
216       Value &= 0xfff;
217     // Unsigned 12-bit immediate which gets multiplied by 4
218     if (Value >= 0x4000)
219       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
220     if (Value & 0x3)
221       Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
222     return Value >> 2;
223   case AArch64::fixup_aarch64_ldst_imm12_scale8:
224     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
225       Value &= 0xfff;
226     // Unsigned 12-bit immediate which gets multiplied by 8
227     if (Value >= 0x8000)
228       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
229     if (Value & 0x7)
230       Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
231     return Value >> 3;
232   case AArch64::fixup_aarch64_ldst_imm12_scale16:
233     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
234       Value &= 0xfff;
235     // Unsigned 12-bit immediate which gets multiplied by 16
236     if (Value >= 0x10000)
237       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
238     if (Value & 0xf)
239       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
240     return Value >> 4;
241   case AArch64::fixup_aarch64_movw: {
242     AArch64MCExpr::VariantKind RefKind =
243         static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
244     if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
245         AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
246       if (!RefKind) {
247         // The fixup is an expression
248         if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
249           Ctx.reportError(Fixup.getLoc(),
250                           "fixup value out of range [-0xFFFF, 0xFFFF]");
251 
252         // Invert the negative immediate because it will feed into a MOVN.
253         if (SignedValue < 0)
254           SignedValue = ~SignedValue;
255         Value = static_cast<uint64_t>(SignedValue);
256       } else
257         // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
258         // ever be resolved in the assembler.
259         Ctx.reportError(Fixup.getLoc(),
260                         "relocation for a thread-local variable points to an "
261                         "absolute symbol");
262       return Value;
263     }
264 
265     if (!IsResolved) {
266       // FIXME: Figure out when this can actually happen, and verify our
267       // behavior.
268       Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
269                                       "implemented");
270       return Value;
271     }
272 
273     if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
274       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
275       case AArch64MCExpr::VK_G0:
276         break;
277       case AArch64MCExpr::VK_G1:
278         SignedValue = SignedValue >> 16;
279         break;
280       case AArch64MCExpr::VK_G2:
281         SignedValue = SignedValue >> 32;
282         break;
283       case AArch64MCExpr::VK_G3:
284         SignedValue = SignedValue >> 48;
285         break;
286       default:
287         llvm_unreachable("Variant kind doesn't correspond to fixup");
288       }
289 
290     } else {
291       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
292       case AArch64MCExpr::VK_G0:
293         break;
294       case AArch64MCExpr::VK_G1:
295         Value = Value >> 16;
296         break;
297       case AArch64MCExpr::VK_G2:
298         Value = Value >> 32;
299         break;
300       case AArch64MCExpr::VK_G3:
301         Value = Value >> 48;
302         break;
303       default:
304         llvm_unreachable("Variant kind doesn't correspond to fixup");
305       }
306     }
307 
308     if (RefKind & AArch64MCExpr::VK_NC) {
309       Value &= 0xFFFF;
310     }
311     else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
312       if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
313         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
314 
315       // Invert the negative immediate because it will feed into a MOVN.
316       if (SignedValue < 0)
317         SignedValue = ~SignedValue;
318       Value = static_cast<uint64_t>(SignedValue);
319     }
320     else if (Value > 0xFFFF) {
321       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
322     }
323     return Value;
324   }
325   case AArch64::fixup_aarch64_pcrel_branch14:
326     // Signed 16-bit immediate
327     if (SignedValue > 32767 || SignedValue < -32768)
328       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
329     // Low two bits are not encoded (4-byte alignment assumed).
330     if (Value & 0x3)
331       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
332     return (Value >> 2) & 0x3fff;
333   case AArch64::fixup_aarch64_pcrel_branch26:
334   case AArch64::fixup_aarch64_pcrel_call26:
335     // Signed 28-bit immediate
336     if (SignedValue > 134217727 || SignedValue < -134217728)
337       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
338     // Low two bits are not encoded (4-byte alignment assumed).
339     if (Value & 0x3)
340       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
341     return (Value >> 2) & 0x3ffffff;
342   case FK_Data_1:
343   case FK_Data_2:
344   case FK_Data_4:
345   case FK_Data_8:
346     if (!valueFitsIntoFixupKind(Fixup.getTargetKind(), Value))
347       Ctx.reportError(Fixup.getLoc(), "fixup value too large for data type!");
348     LLVM_FALLTHROUGH;
349   case FK_SecRel_2:
350   case FK_SecRel_4:
351     return Value;
352   }
353 }
354 
355 Optional<MCFixupKind> AArch64AsmBackend::getFixupKind(StringRef Name) const {
356   if (!TheTriple.isOSBinFormatELF())
357     return None;
358 
359   unsigned Type = llvm::StringSwitch<unsigned>(Name)
360 #define ELF_RELOC(X, Y)  .Case(#X, Y)
361 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
362 #undef ELF_RELOC
363                       .Default(-1u);
364   if (Type == -1u)
365     return None;
366   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
367 }
368 
369 /// getFixupKindContainereSizeInBytes - The number of bytes of the
370 /// container involved in big endian or 0 if the item is little endian
371 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
372   if (Endian == support::little)
373     return 0;
374 
375   switch (Kind) {
376   default:
377     llvm_unreachable("Unknown fixup kind!");
378 
379   case FK_Data_1:
380     return 1;
381   case FK_Data_2:
382     return 2;
383   case FK_Data_4:
384     return 4;
385   case FK_Data_8:
386     return 8;
387 
388   case AArch64::fixup_aarch64_tlsdesc_call:
389   case AArch64::fixup_aarch64_movw:
390   case AArch64::fixup_aarch64_pcrel_branch14:
391   case AArch64::fixup_aarch64_add_imm12:
392   case AArch64::fixup_aarch64_ldst_imm12_scale1:
393   case AArch64::fixup_aarch64_ldst_imm12_scale2:
394   case AArch64::fixup_aarch64_ldst_imm12_scale4:
395   case AArch64::fixup_aarch64_ldst_imm12_scale8:
396   case AArch64::fixup_aarch64_ldst_imm12_scale16:
397   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
398   case AArch64::fixup_aarch64_pcrel_branch19:
399   case AArch64::fixup_aarch64_pcrel_adr_imm21:
400   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
401   case AArch64::fixup_aarch64_pcrel_branch26:
402   case AArch64::fixup_aarch64_pcrel_call26:
403     // Instructions are always little endian
404     return 0;
405   }
406 }
407 
408 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
409                                    const MCValue &Target,
410                                    MutableArrayRef<char> Data, uint64_t Value,
411                                    bool IsResolved,
412                                    const MCSubtargetInfo *STI) const {
413   if (!Value)
414     return; // Doesn't change encoding.
415   unsigned Kind = Fixup.getKind();
416   if (Kind >= FirstLiteralRelocationKind)
417     return;
418   unsigned NumBytes = getFixupKindNumBytes(Kind);
419   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
420   MCContext &Ctx = Asm.getContext();
421   int64_t SignedValue = static_cast<int64_t>(Value);
422   // Apply any target-specific value adjustments.
423   Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
424 
425   // Shift the value into position.
426   Value <<= Info.TargetOffset;
427 
428   unsigned Offset = Fixup.getOffset();
429   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
430 
431   // Used to point to big endian bytes.
432   unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
433 
434   // For each byte of the fragment that the fixup touches, mask in the
435   // bits from the fixup value.
436   if (FulleSizeInBytes == 0) {
437     // Handle as little-endian
438     for (unsigned i = 0; i != NumBytes; ++i) {
439       Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
440     }
441   } else {
442     // Handle as big-endian
443     assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
444     assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
445     for (unsigned i = 0; i != NumBytes; ++i) {
446       unsigned Idx = FulleSizeInBytes - 1 - i;
447       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
448     }
449   }
450 
451   // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
452   // handle this more cleanly. This may affect the output of -show-mc-encoding.
453   AArch64MCExpr::VariantKind RefKind =
454       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
455   if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS ||
456       (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
457     // If the immediate is negative, generate MOVN else MOVZ.
458     // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
459     if (SignedValue < 0)
460       Data[Offset + 3] &= ~(1 << 6);
461     else
462       Data[Offset + 3] |= (1 << 6);
463   }
464 }
465 
466 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst,
467                                           const MCSubtargetInfo &STI) const {
468   return false;
469 }
470 
471 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
472                                              uint64_t Value,
473                                              const MCRelaxableFragment *DF,
474                                              const MCAsmLayout &Layout) const {
475   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
476   // into the targets for now.
477   //
478   // Relax if the value is too big for a (signed) i8.
479   return int64_t(Value) != int64_t(int8_t(Value));
480 }
481 
482 void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
483                                          const MCSubtargetInfo &STI) const {
484   llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
485 }
486 
487 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
488   // If the count is not 4-byte aligned, we must be writing data into the text
489   // section (otherwise we have unaligned instructions, and thus have far
490   // bigger problems), so just write zeros instead.
491   OS.write_zeros(Count % 4);
492 
493   // We are properly aligned, so write NOPs as requested.
494   Count /= 4;
495   for (uint64_t i = 0; i != Count; ++i)
496     support::endian::write<uint32_t>(OS, 0xd503201f, Endian);
497   return true;
498 }
499 
500 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
501                                               const MCFixup &Fixup,
502                                               const MCValue &Target) {
503   unsigned Kind = Fixup.getKind();
504   if (Kind >= FirstLiteralRelocationKind)
505     return true;
506 
507   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
508   // ~0xfff. This means that the required offset to reach a symbol can vary by
509   // up to one step depending on where the ADRP is in memory. For example:
510   //
511   //     ADRP x0, there
512   //  there:
513   //
514   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
515   // we'll need that as an offset. At any other address "there" will be in the
516   // same page as the ADRP and the instruction should encode 0x0. Assuming the
517   // section isn't 0x1000-aligned, we therefore need to delegate this decision
518   // to the linker -- a relocation!
519   if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
520     return true;
521 
522   AArch64MCExpr::VariantKind RefKind =
523       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
524   AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind);
525   // LDR GOT relocations need a relocation
526   if (Kind == AArch64::fixup_aarch64_ldr_pcrel_imm19 &&
527       SymLoc == AArch64MCExpr::VK_GOT)
528     return true;
529   return false;
530 }
531 
532 namespace {
533 
534 namespace CU {
535 
536 /// Compact unwind encoding values.
537 enum CompactUnwindEncodings {
538   /// A "frameless" leaf function, where no non-volatile registers are
539   /// saved. The return remains in LR throughout the function.
540   UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
541 
542   /// No compact unwind encoding available. Instead the low 23-bits of
543   /// the compact unwind encoding is the offset of the DWARF FDE in the
544   /// __eh_frame section. This mode is never used in object files. It is only
545   /// generated by the linker in final linked images, which have only DWARF info
546   /// for a function.
547   UNWIND_ARM64_MODE_DWARF = 0x03000000,
548 
549   /// This is a standard arm64 prologue where FP/LR are immediately
550   /// pushed on the stack, then SP is copied to FP. If there are any
551   /// non-volatile register saved, they are copied into the stack fame in pairs
552   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
553   /// five X pairs and four D pairs can be saved, but the memory layout must be
554   /// in register number order.
555   UNWIND_ARM64_MODE_FRAME = 0x04000000,
556 
557   /// Frame register pair encodings.
558   UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
559   UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
560   UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
561   UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
562   UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
563   UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
564   UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
565   UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
566   UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
567 };
568 
569 } // end CU namespace
570 
571 // FIXME: This should be in a separate file.
572 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
573   const MCRegisterInfo &MRI;
574 
575   /// Encode compact unwind stack adjustment for frameless functions.
576   /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
577   /// The stack size always needs to be 16 byte aligned.
578   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
579     return (StackSize / 16) << 12;
580   }
581 
582 public:
583   DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
584                           const MCRegisterInfo &MRI)
585       : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
586 
587   std::unique_ptr<MCObjectTargetWriter>
588   createObjectTargetWriter() const override {
589     uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
590     uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
591     return createAArch64MachObjectWriter(CPUType, CPUSubType,
592                                          TheTriple.isArch32Bit());
593   }
594 
595   /// Generate the compact unwind encoding from the CFI directives.
596   uint32_t generateCompactUnwindEncoding(
597                              ArrayRef<MCCFIInstruction> Instrs) const override {
598     if (Instrs.empty())
599       return CU::UNWIND_ARM64_MODE_FRAMELESS;
600 
601     bool HasFP = false;
602     unsigned StackSize = 0;
603 
604     uint32_t CompactUnwindEncoding = 0;
605     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
606       const MCCFIInstruction &Inst = Instrs[i];
607 
608       switch (Inst.getOperation()) {
609       default:
610         // Cannot handle this directive:  bail out.
611         return CU::UNWIND_ARM64_MODE_DWARF;
612       case MCCFIInstruction::OpDefCfa: {
613         // Defines a frame pointer.
614         unsigned XReg =
615             getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
616 
617         // Other CFA registers than FP are not supported by compact unwind.
618         // Fallback on DWARF.
619         // FIXME: When opt-remarks are supported in MC, add a remark to notify
620         // the user.
621         if (XReg != AArch64::FP)
622           return CU::UNWIND_ARM64_MODE_DWARF;
623 
624         assert(XReg == AArch64::FP && "Invalid frame pointer!");
625         assert(i + 2 < e && "Insufficient CFI instructions to define a frame!");
626 
627         const MCCFIInstruction &LRPush = Instrs[++i];
628         assert(LRPush.getOperation() == MCCFIInstruction::OpOffset &&
629                "Link register not pushed!");
630         const MCCFIInstruction &FPPush = Instrs[++i];
631         assert(FPPush.getOperation() == MCCFIInstruction::OpOffset &&
632                "Frame pointer not pushed!");
633 
634         unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
635         unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
636 
637         LRReg = getXRegFromWReg(LRReg);
638         FPReg = getXRegFromWReg(FPReg);
639 
640         assert(LRReg == AArch64::LR && FPReg == AArch64::FP &&
641                "Pushing invalid registers for frame!");
642 
643         // Indicate that the function has a frame.
644         CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
645         HasFP = true;
646         break;
647       }
648       case MCCFIInstruction::OpDefCfaOffset: {
649         assert(StackSize == 0 && "We already have the CFA offset!");
650         StackSize = std::abs(Inst.getOffset());
651         break;
652       }
653       case MCCFIInstruction::OpOffset: {
654         // Registers are saved in pairs. We expect there to be two consecutive
655         // `.cfi_offset' instructions with the appropriate registers specified.
656         unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
657         if (i + 1 == e)
658           return CU::UNWIND_ARM64_MODE_DWARF;
659 
660         const MCCFIInstruction &Inst2 = Instrs[++i];
661         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
662           return CU::UNWIND_ARM64_MODE_DWARF;
663         unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
664 
665         // N.B. The encodings must be in register number order, and the X
666         // registers before the D registers.
667 
668         // X19/X20 pair = 0x00000001,
669         // X21/X22 pair = 0x00000002,
670         // X23/X24 pair = 0x00000004,
671         // X25/X26 pair = 0x00000008,
672         // X27/X28 pair = 0x00000010
673         Reg1 = getXRegFromWReg(Reg1);
674         Reg2 = getXRegFromWReg(Reg2);
675 
676         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
677             (CompactUnwindEncoding & 0xF1E) == 0)
678           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
679         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
680                  (CompactUnwindEncoding & 0xF1C) == 0)
681           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
682         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
683                  (CompactUnwindEncoding & 0xF18) == 0)
684           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
685         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
686                  (CompactUnwindEncoding & 0xF10) == 0)
687           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
688         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
689                  (CompactUnwindEncoding & 0xF00) == 0)
690           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
691         else {
692           Reg1 = getDRegFromBReg(Reg1);
693           Reg2 = getDRegFromBReg(Reg2);
694 
695           // D8/D9 pair   = 0x00000100,
696           // D10/D11 pair = 0x00000200,
697           // D12/D13 pair = 0x00000400,
698           // D14/D15 pair = 0x00000800
699           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
700               (CompactUnwindEncoding & 0xE00) == 0)
701             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
702           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
703                    (CompactUnwindEncoding & 0xC00) == 0)
704             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
705           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
706                    (CompactUnwindEncoding & 0x800) == 0)
707             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
708           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
709             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
710           else
711             // A pair was pushed which we cannot handle.
712             return CU::UNWIND_ARM64_MODE_DWARF;
713         }
714 
715         break;
716       }
717       }
718     }
719 
720     if (!HasFP) {
721       // With compact unwind info we can only represent stack adjustments of up
722       // to 65520 bytes.
723       if (StackSize > 65520)
724         return CU::UNWIND_ARM64_MODE_DWARF;
725 
726       CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
727       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
728     }
729 
730     return CompactUnwindEncoding;
731   }
732 };
733 
734 } // end anonymous namespace
735 
736 namespace {
737 
738 class ELFAArch64AsmBackend : public AArch64AsmBackend {
739 public:
740   uint8_t OSABI;
741   bool IsILP32;
742 
743   ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
744                        bool IsLittleEndian, bool IsILP32)
745       : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
746         IsILP32(IsILP32) {}
747 
748   std::unique_ptr<MCObjectTargetWriter>
749   createObjectTargetWriter() const override {
750     return createAArch64ELFObjectWriter(OSABI, IsILP32);
751   }
752 };
753 
754 }
755 
756 namespace {
757 class COFFAArch64AsmBackend : public AArch64AsmBackend {
758 public:
759   COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
760       : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
761 
762   std::unique_ptr<MCObjectTargetWriter>
763   createObjectTargetWriter() const override {
764     return createAArch64WinCOFFObjectWriter();
765   }
766 };
767 }
768 
769 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
770                                               const MCSubtargetInfo &STI,
771                                               const MCRegisterInfo &MRI,
772                                               const MCTargetOptions &Options) {
773   const Triple &TheTriple = STI.getTargetTriple();
774   if (TheTriple.isOSBinFormatMachO()) {
775     return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
776   }
777 
778   if (TheTriple.isOSBinFormatCOFF())
779     return new COFFAArch64AsmBackend(T, TheTriple);
780 
781   assert(TheTriple.isOSBinFormatELF() && "Invalid target");
782 
783   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
784   bool IsILP32 = Options.getABIName() == "ilp32";
785   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
786                                   IsILP32);
787 }
788 
789 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
790                                               const MCSubtargetInfo &STI,
791                                               const MCRegisterInfo &MRI,
792                                               const MCTargetOptions &Options) {
793   const Triple &TheTriple = STI.getTargetTriple();
794   assert(TheTriple.isOSBinFormatELF() &&
795          "Big endian is only supported for ELF targets!");
796   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
797   bool IsILP32 = Options.getABIName() == "ilp32";
798   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
799                                   IsILP32);
800 }
801