xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCDirectives.h"
18 #include "llvm/MC/MCELFObjectWriter.h"
19 #include "llvm/MC/MCFixupKindInfo.h"
20 #include "llvm/MC/MCObjectWriter.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCSectionELF.h"
23 #include "llvm/MC/MCSectionMachO.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/MC/MCValue.h"
27 #include "llvm/MC/TargetRegistry.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/TargetParser/Triple.h"
31 using namespace llvm;
32 
33 namespace {
34 
35 class AArch64AsmBackend : public MCAsmBackend {
36   static const unsigned PCRelFlagVal =
37       MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel;
38 protected:
39   Triple TheTriple;
40 
41 public:
42   AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
43       : MCAsmBackend(IsLittleEndian ? support::little : support::big),
44         TheTriple(TT) {}
45 
46   unsigned getNumFixupKinds() const override {
47     return AArch64::NumTargetFixupKinds;
48   }
49 
50   std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
51 
52   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
53     const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
54         // This table *must* be in the order that the fixup_* kinds are defined
55         // in AArch64FixupKinds.h.
56         //
57         // Name                           Offset (bits) Size (bits)     Flags
58         {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
59         {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
60         {"fixup_aarch64_add_imm12", 10, 12, 0},
61         {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
62         {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
63         {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
64         {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
65         {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
66         {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
67         {"fixup_aarch64_movw", 5, 16, 0},
68         {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
69         {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
70         {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
71         {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
72 
73     // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
74     // require any extra processing.
75     if (Kind >= FirstLiteralRelocationKind)
76       return MCAsmBackend::getFixupKindInfo(FK_NONE);
77 
78     if (Kind < FirstTargetFixupKind)
79       return MCAsmBackend::getFixupKindInfo(Kind);
80 
81     assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
82            "Invalid kind!");
83     return Infos[Kind - FirstTargetFixupKind];
84   }
85 
86   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
87                   const MCValue &Target, MutableArrayRef<char> Data,
88                   uint64_t Value, bool IsResolved,
89                   const MCSubtargetInfo *STI) const override;
90 
91   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
92                             const MCRelaxableFragment *DF,
93                             const MCAsmLayout &Layout) const override;
94   void relaxInstruction(MCInst &Inst,
95                         const MCSubtargetInfo &STI) const override;
96   bool writeNopData(raw_ostream &OS, uint64_t Count,
97                     const MCSubtargetInfo *STI) const override;
98 
99   unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
100 
101   bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
102                              const MCValue &Target) override;
103 };
104 
105 } // end anonymous namespace
106 
107 /// The number of bytes the fixup may change.
108 static unsigned getFixupKindNumBytes(unsigned Kind) {
109   switch (Kind) {
110   default:
111     llvm_unreachable("Unknown fixup kind!");
112 
113   case FK_Data_1:
114     return 1;
115 
116   case FK_Data_2:
117   case FK_SecRel_2:
118     return 2;
119 
120   case AArch64::fixup_aarch64_movw:
121   case AArch64::fixup_aarch64_pcrel_branch14:
122   case AArch64::fixup_aarch64_add_imm12:
123   case AArch64::fixup_aarch64_ldst_imm12_scale1:
124   case AArch64::fixup_aarch64_ldst_imm12_scale2:
125   case AArch64::fixup_aarch64_ldst_imm12_scale4:
126   case AArch64::fixup_aarch64_ldst_imm12_scale8:
127   case AArch64::fixup_aarch64_ldst_imm12_scale16:
128   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
129   case AArch64::fixup_aarch64_pcrel_branch19:
130     return 3;
131 
132   case AArch64::fixup_aarch64_pcrel_adr_imm21:
133   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
134   case AArch64::fixup_aarch64_pcrel_branch26:
135   case AArch64::fixup_aarch64_pcrel_call26:
136   case FK_Data_4:
137   case FK_SecRel_4:
138     return 4;
139 
140   case FK_Data_8:
141     return 8;
142   }
143 }
144 
145 static unsigned AdrImmBits(unsigned Value) {
146   unsigned lo2 = Value & 0x3;
147   unsigned hi19 = (Value & 0x1ffffc) >> 2;
148   return (hi19 << 5) | (lo2 << 29);
149 }
150 
151 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
152                                  uint64_t Value, MCContext &Ctx,
153                                  const Triple &TheTriple, bool IsResolved) {
154   int64_t SignedValue = static_cast<int64_t>(Value);
155   switch (Fixup.getTargetKind()) {
156   default:
157     llvm_unreachable("Unknown fixup kind!");
158   case AArch64::fixup_aarch64_pcrel_adr_imm21:
159     if (!isInt<21>(SignedValue))
160       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
161     return AdrImmBits(Value & 0x1fffffULL);
162   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
163     assert(!IsResolved);
164     if (TheTriple.isOSBinFormatCOFF()) {
165       if (!isInt<21>(SignedValue))
166         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
167       return AdrImmBits(Value & 0x1fffffULL);
168     }
169     return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
170   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
171   case AArch64::fixup_aarch64_pcrel_branch19:
172     // Signed 19-bit immediate which gets multiplied by 4
173     if (!isInt<21>(SignedValue))
174       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
175     if (Value & 0x3)
176       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
177     // Low two bits are not encoded.
178     return (Value >> 2) & 0x7ffff;
179   case AArch64::fixup_aarch64_add_imm12:
180   case AArch64::fixup_aarch64_ldst_imm12_scale1:
181     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
182       Value &= 0xfff;
183     // Unsigned 12-bit immediate
184     if (Value >= 0x1000)
185       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
186     return Value;
187   case AArch64::fixup_aarch64_ldst_imm12_scale2:
188     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
189       Value &= 0xfff;
190     // Unsigned 12-bit immediate which gets multiplied by 2
191     if (Value >= 0x2000)
192       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
193     if (Value & 0x1)
194       Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
195     return Value >> 1;
196   case AArch64::fixup_aarch64_ldst_imm12_scale4:
197     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
198       Value &= 0xfff;
199     // Unsigned 12-bit immediate which gets multiplied by 4
200     if (Value >= 0x4000)
201       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
202     if (Value & 0x3)
203       Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
204     return Value >> 2;
205   case AArch64::fixup_aarch64_ldst_imm12_scale8:
206     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
207       Value &= 0xfff;
208     // Unsigned 12-bit immediate which gets multiplied by 8
209     if (Value >= 0x8000)
210       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
211     if (Value & 0x7)
212       Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
213     return Value >> 3;
214   case AArch64::fixup_aarch64_ldst_imm12_scale16:
215     if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
216       Value &= 0xfff;
217     // Unsigned 12-bit immediate which gets multiplied by 16
218     if (Value >= 0x10000)
219       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
220     if (Value & 0xf)
221       Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
222     return Value >> 4;
223   case AArch64::fixup_aarch64_movw: {
224     AArch64MCExpr::VariantKind RefKind =
225         static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
226     if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS &&
227         AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) {
228       if (!RefKind) {
229         // The fixup is an expression
230         if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
231           Ctx.reportError(Fixup.getLoc(),
232                           "fixup value out of range [-0xFFFF, 0xFFFF]");
233 
234         // Invert the negative immediate because it will feed into a MOVN.
235         if (SignedValue < 0)
236           SignedValue = ~SignedValue;
237         Value = static_cast<uint64_t>(SignedValue);
238       } else
239         // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
240         // ever be resolved in the assembler.
241         Ctx.reportError(Fixup.getLoc(),
242                         "relocation for a thread-local variable points to an "
243                         "absolute symbol");
244       return Value;
245     }
246 
247     if (!IsResolved) {
248       // FIXME: Figure out when this can actually happen, and verify our
249       // behavior.
250       Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
251                                       "implemented");
252       return Value;
253     }
254 
255     if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
256       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
257       case AArch64MCExpr::VK_G0:
258         break;
259       case AArch64MCExpr::VK_G1:
260         SignedValue = SignedValue >> 16;
261         break;
262       case AArch64MCExpr::VK_G2:
263         SignedValue = SignedValue >> 32;
264         break;
265       case AArch64MCExpr::VK_G3:
266         SignedValue = SignedValue >> 48;
267         break;
268       default:
269         llvm_unreachable("Variant kind doesn't correspond to fixup");
270       }
271 
272     } else {
273       switch (AArch64MCExpr::getAddressFrag(RefKind)) {
274       case AArch64MCExpr::VK_G0:
275         break;
276       case AArch64MCExpr::VK_G1:
277         Value = Value >> 16;
278         break;
279       case AArch64MCExpr::VK_G2:
280         Value = Value >> 32;
281         break;
282       case AArch64MCExpr::VK_G3:
283         Value = Value >> 48;
284         break;
285       default:
286         llvm_unreachable("Variant kind doesn't correspond to fixup");
287       }
288     }
289 
290     if (RefKind & AArch64MCExpr::VK_NC) {
291       Value &= 0xFFFF;
292     }
293     else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) {
294       if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
295         Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
296 
297       // Invert the negative immediate because it will feed into a MOVN.
298       if (SignedValue < 0)
299         SignedValue = ~SignedValue;
300       Value = static_cast<uint64_t>(SignedValue);
301     }
302     else if (Value > 0xFFFF) {
303       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
304     }
305     return Value;
306   }
307   case AArch64::fixup_aarch64_pcrel_branch14:
308     // Signed 16-bit immediate
309     if (SignedValue > 32767 || SignedValue < -32768)
310       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
311     // Low two bits are not encoded (4-byte alignment assumed).
312     if (Value & 0x3)
313       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
314     return (Value >> 2) & 0x3fff;
315   case AArch64::fixup_aarch64_pcrel_branch26:
316   case AArch64::fixup_aarch64_pcrel_call26:
317     // Signed 28-bit immediate
318     if (SignedValue > 134217727 || SignedValue < -134217728)
319       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
320     // Low two bits are not encoded (4-byte alignment assumed).
321     if (Value & 0x3)
322       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
323     return (Value >> 2) & 0x3ffffff;
324   case FK_Data_1:
325   case FK_Data_2:
326   case FK_Data_4:
327   case FK_Data_8:
328   case FK_SecRel_2:
329   case FK_SecRel_4:
330     return Value;
331   }
332 }
333 
334 std::optional<MCFixupKind>
335 AArch64AsmBackend::getFixupKind(StringRef Name) const {
336   if (!TheTriple.isOSBinFormatELF())
337     return std::nullopt;
338 
339   unsigned Type = llvm::StringSwitch<unsigned>(Name)
340 #define ELF_RELOC(X, Y)  .Case(#X, Y)
341 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
342 #undef ELF_RELOC
343                       .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
344                       .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
345                       .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
346                       .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
347                       .Default(-1u);
348   if (Type == -1u)
349     return std::nullopt;
350   return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
351 }
352 
353 /// getFixupKindContainereSizeInBytes - The number of bytes of the
354 /// container involved in big endian or 0 if the item is little endian
355 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
356   if (Endian == support::little)
357     return 0;
358 
359   switch (Kind) {
360   default:
361     llvm_unreachable("Unknown fixup kind!");
362 
363   case FK_Data_1:
364     return 1;
365   case FK_Data_2:
366     return 2;
367   case FK_Data_4:
368     return 4;
369   case FK_Data_8:
370     return 8;
371 
372   case AArch64::fixup_aarch64_movw:
373   case AArch64::fixup_aarch64_pcrel_branch14:
374   case AArch64::fixup_aarch64_add_imm12:
375   case AArch64::fixup_aarch64_ldst_imm12_scale1:
376   case AArch64::fixup_aarch64_ldst_imm12_scale2:
377   case AArch64::fixup_aarch64_ldst_imm12_scale4:
378   case AArch64::fixup_aarch64_ldst_imm12_scale8:
379   case AArch64::fixup_aarch64_ldst_imm12_scale16:
380   case AArch64::fixup_aarch64_ldr_pcrel_imm19:
381   case AArch64::fixup_aarch64_pcrel_branch19:
382   case AArch64::fixup_aarch64_pcrel_adr_imm21:
383   case AArch64::fixup_aarch64_pcrel_adrp_imm21:
384   case AArch64::fixup_aarch64_pcrel_branch26:
385   case AArch64::fixup_aarch64_pcrel_call26:
386     // Instructions are always little endian
387     return 0;
388   }
389 }
390 
391 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
392                                    const MCValue &Target,
393                                    MutableArrayRef<char> Data, uint64_t Value,
394                                    bool IsResolved,
395                                    const MCSubtargetInfo *STI) const {
396   if (!Value)
397     return; // Doesn't change encoding.
398   unsigned Kind = Fixup.getKind();
399   if (Kind >= FirstLiteralRelocationKind)
400     return;
401   unsigned NumBytes = getFixupKindNumBytes(Kind);
402   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
403   MCContext &Ctx = Asm.getContext();
404   int64_t SignedValue = static_cast<int64_t>(Value);
405   // Apply any target-specific value adjustments.
406   Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
407 
408   // Shift the value into position.
409   Value <<= Info.TargetOffset;
410 
411   unsigned Offset = Fixup.getOffset();
412   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
413 
414   // Used to point to big endian bytes.
415   unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
416 
417   // For each byte of the fragment that the fixup touches, mask in the
418   // bits from the fixup value.
419   if (FulleSizeInBytes == 0) {
420     // Handle as little-endian
421     for (unsigned i = 0; i != NumBytes; ++i) {
422       Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
423     }
424   } else {
425     // Handle as big-endian
426     assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
427     assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
428     for (unsigned i = 0; i != NumBytes; ++i) {
429       unsigned Idx = FulleSizeInBytes - 1 - i;
430       Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
431     }
432   }
433 
434   // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
435   // handle this more cleanly. This may affect the output of -show-mc-encoding.
436   AArch64MCExpr::VariantKind RefKind =
437       static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind());
438   if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS ||
439       (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) {
440     // If the immediate is negative, generate MOVN else MOVZ.
441     // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
442     if (SignedValue < 0)
443       Data[Offset + 3] &= ~(1 << 6);
444     else
445       Data[Offset + 3] |= (1 << 6);
446   }
447 }
448 
449 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
450                                              uint64_t Value,
451                                              const MCRelaxableFragment *DF,
452                                              const MCAsmLayout &Layout) const {
453   // FIXME:  This isn't correct for AArch64. Just moving the "generic" logic
454   // into the targets for now.
455   //
456   // Relax if the value is too big for a (signed) i8.
457   return int64_t(Value) != int64_t(int8_t(Value));
458 }
459 
460 void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
461                                          const MCSubtargetInfo &STI) const {
462   llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
463 }
464 
465 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
466                                      const MCSubtargetInfo *STI) const {
467   // If the count is not 4-byte aligned, we must be writing data into the text
468   // section (otherwise we have unaligned instructions, and thus have far
469   // bigger problems), so just write zeros instead.
470   OS.write_zeros(Count % 4);
471 
472   // We are properly aligned, so write NOPs as requested.
473   Count /= 4;
474   for (uint64_t i = 0; i != Count; ++i)
475     OS.write("\x1f\x20\x03\xd5", 4);
476   return true;
477 }
478 
479 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
480                                               const MCFixup &Fixup,
481                                               const MCValue &Target) {
482   unsigned Kind = Fixup.getKind();
483   if (Kind >= FirstLiteralRelocationKind)
484     return true;
485 
486   // The ADRP instruction adds some multiple of 0x1000 to the current PC &
487   // ~0xfff. This means that the required offset to reach a symbol can vary by
488   // up to one step depending on where the ADRP is in memory. For example:
489   //
490   //     ADRP x0, there
491   //  there:
492   //
493   // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
494   // we'll need that as an offset. At any other address "there" will be in the
495   // same page as the ADRP and the instruction should encode 0x0. Assuming the
496   // section isn't 0x1000-aligned, we therefore need to delegate this decision
497   // to the linker -- a relocation!
498   if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21)
499     return true;
500 
501   return false;
502 }
503 
504 namespace {
505 
506 namespace CU {
507 
508 /// Compact unwind encoding values.
509 enum CompactUnwindEncodings {
510   /// A "frameless" leaf function, where no non-volatile registers are
511   /// saved. The return remains in LR throughout the function.
512   UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
513 
514   /// No compact unwind encoding available. Instead the low 23-bits of
515   /// the compact unwind encoding is the offset of the DWARF FDE in the
516   /// __eh_frame section. This mode is never used in object files. It is only
517   /// generated by the linker in final linked images, which have only DWARF info
518   /// for a function.
519   UNWIND_ARM64_MODE_DWARF = 0x03000000,
520 
521   /// This is a standard arm64 prologue where FP/LR are immediately
522   /// pushed on the stack, then SP is copied to FP. If there are any
523   /// non-volatile register saved, they are copied into the stack fame in pairs
524   /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
525   /// five X pairs and four D pairs can be saved, but the memory layout must be
526   /// in register number order.
527   UNWIND_ARM64_MODE_FRAME = 0x04000000,
528 
529   /// Frame register pair encodings.
530   UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
531   UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
532   UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
533   UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
534   UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
535   UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
536   UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
537   UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
538   UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
539 };
540 
541 } // end CU namespace
542 
543 // FIXME: This should be in a separate file.
544 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
545   const MCRegisterInfo &MRI;
546 
547   /// Encode compact unwind stack adjustment for frameless functions.
548   /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
549   /// The stack size always needs to be 16 byte aligned.
550   uint32_t encodeStackAdjustment(uint32_t StackSize) const {
551     return (StackSize / 16) << 12;
552   }
553 
554 public:
555   DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
556                           const MCRegisterInfo &MRI)
557       : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
558 
559   std::unique_ptr<MCObjectTargetWriter>
560   createObjectTargetWriter() const override {
561     uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
562     uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
563     return createAArch64MachObjectWriter(CPUType, CPUSubType,
564                                          TheTriple.isArch32Bit());
565   }
566 
567   /// Generate the compact unwind encoding from the CFI directives.
568   uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
569                                          const MCContext *Ctxt) const override {
570     ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
571     if (Instrs.empty())
572       return CU::UNWIND_ARM64_MODE_FRAMELESS;
573     if (!isDarwinCanonicalPersonality(FI->Personality) &&
574         !Ctxt->emitCompactUnwindNonCanonical())
575       return CU::UNWIND_ARM64_MODE_DWARF;
576 
577     bool HasFP = false;
578     unsigned StackSize = 0;
579 
580     uint32_t CompactUnwindEncoding = 0;
581     int CurOffset = 0;
582     for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
583       const MCCFIInstruction &Inst = Instrs[i];
584 
585       switch (Inst.getOperation()) {
586       default:
587         // Cannot handle this directive:  bail out.
588         return CU::UNWIND_ARM64_MODE_DWARF;
589       case MCCFIInstruction::OpDefCfa: {
590         // Defines a frame pointer.
591         unsigned XReg =
592             getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
593 
594         // Other CFA registers than FP are not supported by compact unwind.
595         // Fallback on DWARF.
596         // FIXME: When opt-remarks are supported in MC, add a remark to notify
597         // the user.
598         if (XReg != AArch64::FP)
599           return CU::UNWIND_ARM64_MODE_DWARF;
600 
601         if (i + 2 >= e)
602           return CU::UNWIND_ARM64_MODE_DWARF;
603 
604         const MCCFIInstruction &LRPush = Instrs[++i];
605         if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
606           return CU::UNWIND_ARM64_MODE_DWARF;
607         const MCCFIInstruction &FPPush = Instrs[++i];
608         if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
609           return CU::UNWIND_ARM64_MODE_DWARF;
610 
611         if (FPPush.getOffset() + 8 != LRPush.getOffset())
612           return CU::UNWIND_ARM64_MODE_DWARF;
613         CurOffset = FPPush.getOffset();
614 
615         unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
616         unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
617 
618         LRReg = getXRegFromWReg(LRReg);
619         FPReg = getXRegFromWReg(FPReg);
620 
621         if (LRReg != AArch64::LR || FPReg != AArch64::FP)
622           return CU::UNWIND_ARM64_MODE_DWARF;
623 
624         // Indicate that the function has a frame.
625         CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
626         HasFP = true;
627         break;
628       }
629       case MCCFIInstruction::OpDefCfaOffset: {
630         if (StackSize != 0)
631           return CU::UNWIND_ARM64_MODE_DWARF;
632         StackSize = std::abs(Inst.getOffset());
633         break;
634       }
635       case MCCFIInstruction::OpOffset: {
636         // Registers are saved in pairs. We expect there to be two consecutive
637         // `.cfi_offset' instructions with the appropriate registers specified.
638         unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
639         if (i + 1 == e)
640           return CU::UNWIND_ARM64_MODE_DWARF;
641 
642         if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
643           return CU::UNWIND_ARM64_MODE_DWARF;
644         CurOffset = Inst.getOffset();
645 
646         const MCCFIInstruction &Inst2 = Instrs[++i];
647         if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
648           return CU::UNWIND_ARM64_MODE_DWARF;
649         unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
650 
651         if (Inst2.getOffset() != CurOffset - 8)
652           return CU::UNWIND_ARM64_MODE_DWARF;
653         CurOffset = Inst2.getOffset();
654 
655         // N.B. The encodings must be in register number order, and the X
656         // registers before the D registers.
657 
658         // X19/X20 pair = 0x00000001,
659         // X21/X22 pair = 0x00000002,
660         // X23/X24 pair = 0x00000004,
661         // X25/X26 pair = 0x00000008,
662         // X27/X28 pair = 0x00000010
663         Reg1 = getXRegFromWReg(Reg1);
664         Reg2 = getXRegFromWReg(Reg2);
665 
666         if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
667             (CompactUnwindEncoding & 0xF1E) == 0)
668           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
669         else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
670                  (CompactUnwindEncoding & 0xF1C) == 0)
671           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
672         else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
673                  (CompactUnwindEncoding & 0xF18) == 0)
674           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
675         else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
676                  (CompactUnwindEncoding & 0xF10) == 0)
677           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
678         else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
679                  (CompactUnwindEncoding & 0xF00) == 0)
680           CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
681         else {
682           Reg1 = getDRegFromBReg(Reg1);
683           Reg2 = getDRegFromBReg(Reg2);
684 
685           // D8/D9 pair   = 0x00000100,
686           // D10/D11 pair = 0x00000200,
687           // D12/D13 pair = 0x00000400,
688           // D14/D15 pair = 0x00000800
689           if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
690               (CompactUnwindEncoding & 0xE00) == 0)
691             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
692           else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
693                    (CompactUnwindEncoding & 0xC00) == 0)
694             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
695           else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
696                    (CompactUnwindEncoding & 0x800) == 0)
697             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
698           else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
699             CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
700           else
701             // A pair was pushed which we cannot handle.
702             return CU::UNWIND_ARM64_MODE_DWARF;
703         }
704 
705         break;
706       }
707       }
708     }
709 
710     if (!HasFP) {
711       // With compact unwind info we can only represent stack adjustments of up
712       // to 65520 bytes.
713       if (StackSize > 65520)
714         return CU::UNWIND_ARM64_MODE_DWARF;
715 
716       CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
717       CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
718     }
719 
720     return CompactUnwindEncoding;
721   }
722 };
723 
724 } // end anonymous namespace
725 
726 namespace {
727 
728 class ELFAArch64AsmBackend : public AArch64AsmBackend {
729 public:
730   uint8_t OSABI;
731   bool IsILP32;
732 
733   ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
734                        bool IsLittleEndian, bool IsILP32)
735       : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
736         IsILP32(IsILP32) {}
737 
738   std::unique_ptr<MCObjectTargetWriter>
739   createObjectTargetWriter() const override {
740     return createAArch64ELFObjectWriter(OSABI, IsILP32);
741   }
742 };
743 
744 }
745 
746 namespace {
747 class COFFAArch64AsmBackend : public AArch64AsmBackend {
748 public:
749   COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
750       : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
751 
752   std::unique_ptr<MCObjectTargetWriter>
753   createObjectTargetWriter() const override {
754     return createAArch64WinCOFFObjectWriter(TheTriple);
755   }
756 };
757 }
758 
759 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
760                                               const MCSubtargetInfo &STI,
761                                               const MCRegisterInfo &MRI,
762                                               const MCTargetOptions &Options) {
763   const Triple &TheTriple = STI.getTargetTriple();
764   if (TheTriple.isOSBinFormatMachO()) {
765     return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
766   }
767 
768   if (TheTriple.isOSBinFormatCOFF())
769     return new COFFAArch64AsmBackend(T, TheTriple);
770 
771   assert(TheTriple.isOSBinFormatELF() && "Invalid target");
772 
773   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
774   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
775   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
776                                   IsILP32);
777 }
778 
779 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
780                                               const MCSubtargetInfo &STI,
781                                               const MCRegisterInfo &MRI,
782                                               const MCTargetOptions &Options) {
783   const Triple &TheTriple = STI.getTargetTriple();
784   assert(TheTriple.isOSBinFormatELF() &&
785          "Big endian is only supported for ELF targets!");
786   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
787   bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
788   return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
789                                   IsILP32);
790 }
791