1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCAsmInfo.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCELFObjectWriter.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCSubtargetInfo.h"
21 #include "llvm/MC/MCTargetOptions.h"
22 #include "llvm/MC/MCValue.h"
23 #include "llvm/MC/TargetRegistry.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/TargetParser/Triple.h"
27 using namespace llvm;
28
29 namespace {
30
31 class AArch64AsmBackend : public MCAsmBackend {
32 protected:
33 Triple TheTriple;
34
35 public:
AArch64AsmBackend(const Target & T,const Triple & TT,bool IsLittleEndian)36 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
37 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little
38 : llvm::endianness::big),
39 TheTriple(TT) {}
40
41
42 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
43
getFixupKindInfo(MCFixupKind Kind) const44 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override {
45 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = {
46 // This table *must* be in the order that the fixup_* kinds are defined
47 // in AArch64FixupKinds.h.
48 //
49 // Name Offset (bits) Size (bits) Flags
50 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, 0},
51 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, 0},
52 {"fixup_aarch64_add_imm12", 10, 12, 0},
53 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
54 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
55 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
56 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
57 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
58 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, 0},
59 {"fixup_aarch64_movw", 5, 16, 0},
60 {"fixup_aarch64_pcrel_branch9", 5, 9, 0},
61 {"fixup_aarch64_pcrel_branch14", 5, 14, 0},
62 {"fixup_aarch64_pcrel_branch16", 5, 16, 0},
63 {"fixup_aarch64_pcrel_branch19", 5, 19, 0},
64 {"fixup_aarch64_pcrel_branch26", 0, 26, 0},
65 {"fixup_aarch64_pcrel_call26", 0, 26, 0}};
66
67 // Fixup kinds from raw relocation types and .reloc directives force
68 // relocations and do not need these fields.
69 if (mc::isRelocation(Kind))
70 return {};
71
72 if (Kind < FirstTargetFixupKind)
73 return MCAsmBackend::getFixupKindInfo(Kind);
74
75 assert(unsigned(Kind - FirstTargetFixupKind) <
76 AArch64::NumTargetFixupKinds &&
77 "Invalid kind!");
78 return Infos[Kind - FirstTargetFixupKind];
79 }
80
81 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
82 MutableArrayRef<char> Data, uint64_t Value,
83 bool IsResolved) override;
84
85 bool fixupNeedsRelaxation(const MCFixup &Fixup,
86 uint64_t Value) const override;
87 bool writeNopData(raw_ostream &OS, uint64_t Count,
88 const MCSubtargetInfo *STI) const override;
89
90 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
91 };
92
93 } // end anonymous namespace
94
95 /// The number of bytes the fixup may change.
getFixupKindNumBytes(unsigned Kind)96 static unsigned getFixupKindNumBytes(unsigned Kind) {
97 switch (Kind) {
98 default:
99 llvm_unreachable("Unknown fixup kind!");
100
101 case FK_Data_1:
102 return 1;
103
104 case FK_Data_2:
105 case FK_SecRel_2:
106 return 2;
107
108 case AArch64::fixup_aarch64_movw:
109 case AArch64::fixup_aarch64_pcrel_branch9:
110 case AArch64::fixup_aarch64_pcrel_branch14:
111 case AArch64::fixup_aarch64_pcrel_branch16:
112 case AArch64::fixup_aarch64_add_imm12:
113 case AArch64::fixup_aarch64_ldst_imm12_scale1:
114 case AArch64::fixup_aarch64_ldst_imm12_scale2:
115 case AArch64::fixup_aarch64_ldst_imm12_scale4:
116 case AArch64::fixup_aarch64_ldst_imm12_scale8:
117 case AArch64::fixup_aarch64_ldst_imm12_scale16:
118 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
119 case AArch64::fixup_aarch64_pcrel_branch19:
120 return 3;
121
122 case AArch64::fixup_aarch64_pcrel_adr_imm21:
123 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
124 case AArch64::fixup_aarch64_pcrel_branch26:
125 case AArch64::fixup_aarch64_pcrel_call26:
126 case FK_Data_4:
127 case FK_SecRel_4:
128 return 4;
129
130 case FK_Data_8:
131 return 8;
132 }
133 }
134
AdrImmBits(unsigned Value)135 static unsigned AdrImmBits(unsigned Value) {
136 unsigned lo2 = Value & 0x3;
137 unsigned hi19 = (Value & 0x1ffffc) >> 2;
138 return (hi19 << 5) | (lo2 << 29);
139 }
140
adjustFixupValue(const MCFixup & Fixup,const MCValue & Target,uint64_t Value,MCContext & Ctx,const Triple & TheTriple,bool IsResolved)141 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
142 uint64_t Value, MCContext &Ctx,
143 const Triple &TheTriple, bool IsResolved) {
144 int64_t SignedValue = static_cast<int64_t>(Value);
145 switch (Fixup.getKind()) {
146 default:
147 llvm_unreachable("Unknown fixup kind!");
148 case AArch64::fixup_aarch64_pcrel_adr_imm21:
149 if (!isInt<21>(SignedValue))
150 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
151 return AdrImmBits(Value & 0x1fffffULL);
152 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
153 assert(!IsResolved);
154 if (TheTriple.isOSBinFormatCOFF()) {
155 if (!isInt<21>(SignedValue))
156 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
157 return AdrImmBits(Value & 0x1fffffULL);
158 }
159 return AdrImmBits((Value & 0x1fffff000ULL) >> 12);
160 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
161 case AArch64::fixup_aarch64_pcrel_branch19:
162 // Signed 19-bit immediate which gets multiplied by 4
163 if (!isInt<21>(SignedValue))
164 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
165 if (Value & 0x3)
166 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
167 // Low two bits are not encoded.
168 return (Value >> 2) & 0x7ffff;
169 case AArch64::fixup_aarch64_add_imm12:
170 case AArch64::fixup_aarch64_ldst_imm12_scale1:
171 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
172 Value &= 0xfff;
173 // Unsigned 12-bit immediate
174 if (!isUInt<12>(Value))
175 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
176 return Value;
177 case AArch64::fixup_aarch64_ldst_imm12_scale2:
178 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
179 Value &= 0xfff;
180 // Unsigned 12-bit immediate which gets multiplied by 2
181 if (!isUInt<13>(Value))
182 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
183 if (Value & 0x1)
184 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
185 return Value >> 1;
186 case AArch64::fixup_aarch64_ldst_imm12_scale4:
187 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
188 Value &= 0xfff;
189 // Unsigned 12-bit immediate which gets multiplied by 4
190 if (!isUInt<14>(Value))
191 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
192 if (Value & 0x3)
193 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
194 return Value >> 2;
195 case AArch64::fixup_aarch64_ldst_imm12_scale8:
196 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
197 Value &= 0xfff;
198 // Unsigned 12-bit immediate which gets multiplied by 8
199 if (!isUInt<15>(Value))
200 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
201 if (Value & 0x7)
202 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
203 return Value >> 3;
204 case AArch64::fixup_aarch64_ldst_imm12_scale16:
205 if (TheTriple.isOSBinFormatCOFF() && !IsResolved)
206 Value &= 0xfff;
207 // Unsigned 12-bit immediate which gets multiplied by 16
208 if (!isUInt<16>(Value))
209 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
210 if (Value & 0xf)
211 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
212 return Value >> 4;
213 case AArch64::fixup_aarch64_movw: {
214 AArch64::Specifier RefKind =
215 static_cast<AArch64::Specifier>(Target.getSpecifier());
216 if (AArch64::getSymbolLoc(RefKind) != AArch64::S_ABS &&
217 AArch64::getSymbolLoc(RefKind) != AArch64::S_SABS) {
218 if (!RefKind) {
219 // The fixup is an expression
220 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
221 Ctx.reportError(Fixup.getLoc(),
222 "fixup value out of range [-0xFFFF, 0xFFFF]");
223
224 // Invert the negative immediate because it will feed into a MOVN.
225 if (SignedValue < 0)
226 SignedValue = ~SignedValue;
227 Value = static_cast<uint64_t>(SignedValue);
228 } else
229 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
230 // ever be resolved in the assembler.
231 Ctx.reportError(Fixup.getLoc(),
232 "relocation for a thread-local variable points to an "
233 "absolute symbol");
234 return Value;
235 }
236
237 if (!IsResolved) {
238 // FIXME: Figure out when this can actually happen, and verify our
239 // behavior.
240 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
241 "implemented");
242 return Value;
243 }
244
245 if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS) {
246 switch (AArch64::getAddressFrag(RefKind)) {
247 case AArch64::S_G0:
248 break;
249 case AArch64::S_G1:
250 SignedValue = SignedValue >> 16;
251 break;
252 case AArch64::S_G2:
253 SignedValue = SignedValue >> 32;
254 break;
255 case AArch64::S_G3:
256 SignedValue = SignedValue >> 48;
257 break;
258 default:
259 llvm_unreachable("Variant kind doesn't correspond to fixup");
260 }
261
262 } else {
263 switch (AArch64::getAddressFrag(RefKind)) {
264 case AArch64::S_G0:
265 break;
266 case AArch64::S_G1:
267 Value = Value >> 16;
268 break;
269 case AArch64::S_G2:
270 Value = Value >> 32;
271 break;
272 case AArch64::S_G3:
273 Value = Value >> 48;
274 break;
275 default:
276 llvm_unreachable("Variant kind doesn't correspond to fixup");
277 }
278 }
279
280 if (RefKind & AArch64::S_NC) {
281 Value &= 0xFFFF;
282 } else if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS) {
283 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
284 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
285
286 // Invert the negative immediate because it will feed into a MOVN.
287 if (SignedValue < 0)
288 SignedValue = ~SignedValue;
289 Value = static_cast<uint64_t>(SignedValue);
290 } else if (Value > 0xFFFF) {
291 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
292 }
293 return Value;
294 }
295 case AArch64::fixup_aarch64_pcrel_branch9:
296 // Signed 11-bit(9bits + 2 shifts) label
297 if (!isInt<11>(SignedValue))
298 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
299 // Low two bits are not encoded (4-byte alignment assumed).
300 if (Value & 0b11)
301 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
302 return (Value >> 2) & 0x1ff;
303 case AArch64::fixup_aarch64_pcrel_branch14:
304 // Signed 16-bit immediate
305 if (!isInt<16>(SignedValue))
306 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
307 // Low two bits are not encoded (4-byte alignment assumed).
308 if (Value & 0x3)
309 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
310 return (Value >> 2) & 0x3fff;
311 case AArch64::fixup_aarch64_pcrel_branch16:
312 // Unsigned PC-relative offset, so invert the negative immediate.
313 SignedValue = -SignedValue;
314 Value = static_cast<uint64_t>(SignedValue);
315 // Check valid 18-bit unsigned range.
316 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
317 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
318 // Low two bits are not encoded (4-byte alignment assumed).
319 if (Value & 0b11)
320 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
321 return (Value >> 2) & 0xffff;
322 case AArch64::fixup_aarch64_pcrel_branch26:
323 case AArch64::fixup_aarch64_pcrel_call26:
324 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
325 // MSVC link.exe and lld do not support this relocation type
326 // with a non-zero offset
327 Ctx.reportError(Fixup.getLoc(),
328 "cannot perform a PC-relative fixup with a non-zero "
329 "symbol offset");
330 }
331 // Signed 28-bit immediate
332 if (!isInt<28>(SignedValue))
333 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
334 // Low two bits are not encoded (4-byte alignment assumed).
335 if (Value & 0x3)
336 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
337 return (Value >> 2) & 0x3ffffff;
338 case FK_Data_1:
339 case FK_Data_2:
340 case FK_Data_4:
341 case FK_Data_8:
342 case FK_SecRel_2:
343 case FK_SecRel_4:
344 return Value;
345 }
346 }
347
348 std::optional<MCFixupKind>
getFixupKind(StringRef Name) const349 AArch64AsmBackend::getFixupKind(StringRef Name) const {
350 if (!TheTriple.isOSBinFormatELF())
351 return std::nullopt;
352
353 unsigned Type = llvm::StringSwitch<unsigned>(Name)
354 #define ELF_RELOC(X, Y) .Case(#X, Y)
355 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
356 #undef ELF_RELOC
357 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
358 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
359 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
360 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
361 .Default(-1u);
362 if (Type == -1u)
363 return std::nullopt;
364 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
365 }
366
367 /// getFixupKindContainereSizeInBytes - The number of bytes of the
368 /// container involved in big endian or 0 if the item is little endian
getFixupKindContainereSizeInBytes(unsigned Kind) const369 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
370 if (Endian == llvm::endianness::little)
371 return 0;
372
373 switch (Kind) {
374 default:
375 llvm_unreachable("Unknown fixup kind!");
376
377 case FK_Data_1:
378 return 1;
379 case FK_Data_2:
380 return 2;
381 case FK_Data_4:
382 return 4;
383 case FK_Data_8:
384 return 8;
385
386 case AArch64::fixup_aarch64_movw:
387 case AArch64::fixup_aarch64_pcrel_branch9:
388 case AArch64::fixup_aarch64_pcrel_branch14:
389 case AArch64::fixup_aarch64_pcrel_branch16:
390 case AArch64::fixup_aarch64_add_imm12:
391 case AArch64::fixup_aarch64_ldst_imm12_scale1:
392 case AArch64::fixup_aarch64_ldst_imm12_scale2:
393 case AArch64::fixup_aarch64_ldst_imm12_scale4:
394 case AArch64::fixup_aarch64_ldst_imm12_scale8:
395 case AArch64::fixup_aarch64_ldst_imm12_scale16:
396 case AArch64::fixup_aarch64_ldr_pcrel_imm19:
397 case AArch64::fixup_aarch64_pcrel_branch19:
398 case AArch64::fixup_aarch64_pcrel_adr_imm21:
399 case AArch64::fixup_aarch64_pcrel_adrp_imm21:
400 case AArch64::fixup_aarch64_pcrel_branch26:
401 case AArch64::fixup_aarch64_pcrel_call26:
402 // Instructions are always little endian
403 return 0;
404 }
405 }
406
shouldForceRelocation(const MCFixup & Fixup)407 static bool shouldForceRelocation(const MCFixup &Fixup) {
408 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
409 // ~0xfff. This means that the required offset to reach a symbol can vary by
410 // up to one step depending on where the ADRP is in memory. For example:
411 //
412 // ADRP x0, there
413 // there:
414 //
415 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
416 // we'll need that as an offset. At any other address "there" will be in the
417 // same page as the ADRP and the instruction should encode 0x0. Assuming the
418 // section isn't 0x1000-aligned, we therefore need to delegate this decision
419 // to the linker -- a relocation!
420 return Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21;
421 }
422
applyFixup(const MCFragment & F,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved)423 void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
424 const MCValue &Target,
425 MutableArrayRef<char> Data, uint64_t Value,
426 bool IsResolved) {
427 if (shouldForceRelocation(Fixup))
428 IsResolved = false;
429 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
430 MCFixupKind Kind = Fixup.getKind();
431 if (mc::isRelocation(Kind))
432 return;
433
434 if (Fixup.getKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) {
435 auto RefKind = static_cast<AArch64::Specifier>(Target.getSpecifier());
436 AArch64::Specifier SymLoc = AArch64::getSymbolLoc(RefKind);
437 if (SymLoc == AArch64::S_AUTH || SymLoc == AArch64::S_AUTHADDR) {
438 const auto *Expr = dyn_cast<AArch64AuthMCExpr>(Fixup.getValue());
439 if (!Expr) {
440 getContext().reportError(Fixup.getValue()->getLoc(),
441 "expected relocatable expression");
442 return;
443 }
444 assert(Value == 0);
445 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
446 (uint64_t(Expr->getKey()) << 60) |
447 (uint64_t(Expr->hasAddressDiversity()) << 63);
448 }
449 }
450
451 if (!Value)
452 return; // Doesn't change encoding.
453 unsigned NumBytes = getFixupKindNumBytes(Kind);
454 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
455 MCContext &Ctx = getContext();
456 int64_t SignedValue = static_cast<int64_t>(Value);
457 // Apply any target-specific value adjustments.
458 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved);
459
460 // Shift the value into position.
461 Value <<= Info.TargetOffset;
462
463 unsigned Offset = Fixup.getOffset();
464 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
465
466 // Used to point to big endian bytes.
467 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
468
469 // For each byte of the fragment that the fixup touches, mask in the
470 // bits from the fixup value.
471 if (FulleSizeInBytes == 0) {
472 // Handle as little-endian
473 for (unsigned i = 0; i != NumBytes; ++i) {
474 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
475 }
476 } else {
477 // Handle as big-endian
478 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
479 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
480 for (unsigned i = 0; i != NumBytes; ++i) {
481 unsigned Idx = FulleSizeInBytes - 1 - i;
482 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
483 }
484 }
485
486 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
487 // handle this more cleanly. This may affect the output of -show-mc-encoding.
488 AArch64::Specifier RefKind =
489 static_cast<AArch64::Specifier>(Target.getSpecifier());
490 if (AArch64::getSymbolLoc(RefKind) == AArch64::S_SABS ||
491 (!RefKind && Fixup.getKind() == AArch64::fixup_aarch64_movw)) {
492 // If the immediate is negative, generate MOVN else MOVZ.
493 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
494 if (SignedValue < 0)
495 Data[Offset + 3] &= ~(1 << 6);
496 else
497 Data[Offset + 3] |= (1 << 6);
498 }
499 }
500
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value) const501 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
502 uint64_t Value) const {
503 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
504 // into the targets for now.
505 //
506 // Relax if the value is too big for a (signed) i8.
507 return int64_t(Value) != int64_t(int8_t(Value));
508 }
509
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const510 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
511 const MCSubtargetInfo *STI) const {
512 // If the count is not 4-byte aligned, we must be writing data into the text
513 // section (otherwise we have unaligned instructions, and thus have far
514 // bigger problems), so just write zeros instead.
515 OS.write_zeros(Count % 4);
516
517 // We are properly aligned, so write NOPs as requested.
518 Count /= 4;
519 for (uint64_t i = 0; i != Count; ++i)
520 OS.write("\x1f\x20\x03\xd5", 4);
521 return true;
522 }
523
524 namespace {
525
526 namespace CU {
527
528 /// Compact unwind encoding values.
529 enum CompactUnwindEncodings {
530 /// A "frameless" leaf function, where no non-volatile registers are
531 /// saved. The return remains in LR throughout the function.
532 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
533
534 /// No compact unwind encoding available. Instead the low 23-bits of
535 /// the compact unwind encoding is the offset of the DWARF FDE in the
536 /// __eh_frame section. This mode is never used in object files. It is only
537 /// generated by the linker in final linked images, which have only DWARF info
538 /// for a function.
539 UNWIND_ARM64_MODE_DWARF = 0x03000000,
540
541 /// This is a standard arm64 prologue where FP/LR are immediately
542 /// pushed on the stack, then SP is copied to FP. If there are any
543 /// non-volatile register saved, they are copied into the stack fame in pairs
544 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
545 /// five X pairs and four D pairs can be saved, but the memory layout must be
546 /// in register number order.
547 UNWIND_ARM64_MODE_FRAME = 0x04000000,
548
549 /// Frame register pair encodings.
550 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
551 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
552 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
553 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
554 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
555 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
556 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
557 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
558 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
559 };
560
561 } // end CU namespace
562
563 // FIXME: This should be in a separate file.
564 class DarwinAArch64AsmBackend : public AArch64AsmBackend {
565 const MCRegisterInfo &MRI;
566
567 /// Encode compact unwind stack adjustment for frameless functions.
568 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
569 /// The stack size always needs to be 16 byte aligned.
encodeStackAdjustment(uint32_t StackSize) const570 uint32_t encodeStackAdjustment(uint32_t StackSize) const {
571 return (StackSize / 16) << 12;
572 }
573
574 public:
DarwinAArch64AsmBackend(const Target & T,const Triple & TT,const MCRegisterInfo & MRI)575 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
576 const MCRegisterInfo &MRI)
577 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {}
578
579 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const580 createObjectTargetWriter() const override {
581 uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple));
582 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple));
583 return createAArch64MachObjectWriter(CPUType, CPUSubType,
584 TheTriple.isArch32Bit());
585 }
586
587 /// Generate the compact unwind encoding from the CFI directives.
generateCompactUnwindEncoding(const MCDwarfFrameInfo * FI,const MCContext * Ctxt) const588 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
589 const MCContext *Ctxt) const override {
590 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
591 if (Instrs.empty())
592 return CU::UNWIND_ARM64_MODE_FRAMELESS;
593 if (!isDarwinCanonicalPersonality(FI->Personality) &&
594 !Ctxt->emitCompactUnwindNonCanonical())
595 return CU::UNWIND_ARM64_MODE_DWARF;
596
597 bool HasFP = false;
598 uint64_t StackSize = 0;
599
600 uint64_t CompactUnwindEncoding = 0;
601 int64_t CurOffset = 0;
602 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
603 const MCCFIInstruction &Inst = Instrs[i];
604
605 switch (Inst.getOperation()) {
606 default:
607 // Cannot handle this directive: bail out.
608 return CU::UNWIND_ARM64_MODE_DWARF;
609 case MCCFIInstruction::OpDefCfa: {
610 // Defines a frame pointer.
611 MCRegister XReg =
612 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true));
613
614 // Other CFA registers than FP are not supported by compact unwind.
615 // Fallback on DWARF.
616 // FIXME: When opt-remarks are supported in MC, add a remark to notify
617 // the user.
618 if (XReg != AArch64::FP)
619 return CU::UNWIND_ARM64_MODE_DWARF;
620
621 if (i + 2 >= e)
622 return CU::UNWIND_ARM64_MODE_DWARF;
623
624 const MCCFIInstruction &LRPush = Instrs[++i];
625 if (LRPush.getOperation() != MCCFIInstruction::OpOffset)
626 return CU::UNWIND_ARM64_MODE_DWARF;
627 const MCCFIInstruction &FPPush = Instrs[++i];
628 if (FPPush.getOperation() != MCCFIInstruction::OpOffset)
629 return CU::UNWIND_ARM64_MODE_DWARF;
630
631 if (FPPush.getOffset() + 8 != LRPush.getOffset())
632 return CU::UNWIND_ARM64_MODE_DWARF;
633 CurOffset = FPPush.getOffset();
634
635 MCRegister LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true);
636 MCRegister FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true);
637
638 LRReg = getXRegFromWReg(LRReg);
639 FPReg = getXRegFromWReg(FPReg);
640
641 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
642 return CU::UNWIND_ARM64_MODE_DWARF;
643
644 // Indicate that the function has a frame.
645 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
646 HasFP = true;
647 break;
648 }
649 case MCCFIInstruction::OpDefCfaOffset: {
650 if (StackSize != 0)
651 return CU::UNWIND_ARM64_MODE_DWARF;
652 StackSize = std::abs(Inst.getOffset());
653 break;
654 }
655 case MCCFIInstruction::OpOffset: {
656 // Registers are saved in pairs. We expect there to be two consecutive
657 // `.cfi_offset' instructions with the appropriate registers specified.
658 MCRegister Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true);
659 if (i + 1 == e)
660 return CU::UNWIND_ARM64_MODE_DWARF;
661
662 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
663 return CU::UNWIND_ARM64_MODE_DWARF;
664 CurOffset = Inst.getOffset();
665
666 const MCCFIInstruction &Inst2 = Instrs[++i];
667 if (Inst2.getOperation() != MCCFIInstruction::OpOffset)
668 return CU::UNWIND_ARM64_MODE_DWARF;
669 MCRegister Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true);
670
671 if (Inst2.getOffset() != CurOffset - 8)
672 return CU::UNWIND_ARM64_MODE_DWARF;
673 CurOffset = Inst2.getOffset();
674
675 // N.B. The encodings must be in register number order, and the X
676 // registers before the D registers.
677
678 // X19/X20 pair = 0x00000001,
679 // X21/X22 pair = 0x00000002,
680 // X23/X24 pair = 0x00000004,
681 // X25/X26 pair = 0x00000008,
682 // X27/X28 pair = 0x00000010
683 Reg1 = getXRegFromWReg(Reg1);
684 Reg2 = getXRegFromWReg(Reg2);
685
686 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
687 (CompactUnwindEncoding & 0xF1E) == 0)
688 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
689 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
690 (CompactUnwindEncoding & 0xF1C) == 0)
691 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
692 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
693 (CompactUnwindEncoding & 0xF18) == 0)
694 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
695 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
696 (CompactUnwindEncoding & 0xF10) == 0)
697 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
698 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
699 (CompactUnwindEncoding & 0xF00) == 0)
700 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
701 else {
702 Reg1 = getDRegFromBReg(Reg1);
703 Reg2 = getDRegFromBReg(Reg2);
704
705 // D8/D9 pair = 0x00000100,
706 // D10/D11 pair = 0x00000200,
707 // D12/D13 pair = 0x00000400,
708 // D14/D15 pair = 0x00000800
709 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
710 (CompactUnwindEncoding & 0xE00) == 0)
711 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
712 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
713 (CompactUnwindEncoding & 0xC00) == 0)
714 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
715 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
716 (CompactUnwindEncoding & 0x800) == 0)
717 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
718 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
719 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
720 else
721 // A pair was pushed which we cannot handle.
722 return CU::UNWIND_ARM64_MODE_DWARF;
723 }
724
725 break;
726 }
727 }
728 }
729
730 if (!HasFP) {
731 // With compact unwind info we can only represent stack adjustments of up
732 // to 65520 bytes.
733 if (StackSize > 65520)
734 return CU::UNWIND_ARM64_MODE_DWARF;
735
736 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
737 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
738 }
739
740 return CompactUnwindEncoding;
741 }
742 };
743
744 } // end anonymous namespace
745
746 namespace {
747
748 class ELFAArch64AsmBackend : public AArch64AsmBackend {
749 public:
750 uint8_t OSABI;
751 bool IsILP32;
752
ELFAArch64AsmBackend(const Target & T,const Triple & TT,uint8_t OSABI,bool IsLittleEndian,bool IsILP32)753 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI,
754 bool IsLittleEndian, bool IsILP32)
755 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
756 IsILP32(IsILP32) {}
757
758 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const759 createObjectTargetWriter() const override {
760 return createAArch64ELFObjectWriter(OSABI, IsILP32);
761 }
762 };
763
764 }
765
766 namespace {
767 class COFFAArch64AsmBackend : public AArch64AsmBackend {
768 public:
COFFAArch64AsmBackend(const Target & T,const Triple & TheTriple)769 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
770 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {}
771
772 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const773 createObjectTargetWriter() const override {
774 return createAArch64WinCOFFObjectWriter(TheTriple);
775 }
776 };
777 }
778
createAArch64leAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)779 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
780 const MCSubtargetInfo &STI,
781 const MCRegisterInfo &MRI,
782 const MCTargetOptions &Options) {
783 const Triple &TheTriple = STI.getTargetTriple();
784 if (TheTriple.isOSBinFormatMachO()) {
785 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
786 }
787
788 if (TheTriple.isOSBinFormatCOFF())
789 return new COFFAArch64AsmBackend(T, TheTriple);
790
791 assert(TheTriple.isOSBinFormatELF() && "Invalid target");
792
793 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
794 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
795 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true,
796 IsILP32);
797 }
798
createAArch64beAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)799 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
800 const MCSubtargetInfo &STI,
801 const MCRegisterInfo &MRI,
802 const MCTargetOptions &Options) {
803 const Triple &TheTriple = STI.getTargetTriple();
804 assert(TheTriple.isOSBinFormatELF() &&
805 "Big endian is only supported for ELF targets!");
806 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
807 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
808 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false,
809 IsILP32);
810 }
811