1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/AArch64FixupKinds.h" 10 #include "MCTargetDesc/AArch64MCExpr.h" 11 #include "MCTargetDesc/AArch64MCTargetDesc.h" 12 #include "Utils/AArch64BaseInfo.h" 13 #include "llvm/BinaryFormat/MachO.h" 14 #include "llvm/MC/MCAsmBackend.h" 15 #include "llvm/MC/MCAssembler.h" 16 #include "llvm/MC/MCContext.h" 17 #include "llvm/MC/MCDirectives.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCFixupKindInfo.h" 20 #include "llvm/MC/MCObjectWriter.h" 21 #include "llvm/MC/MCRegisterInfo.h" 22 #include "llvm/MC/MCSectionELF.h" 23 #include "llvm/MC/MCSectionMachO.h" 24 #include "llvm/MC/MCSubtargetInfo.h" 25 #include "llvm/MC/MCTargetOptions.h" 26 #include "llvm/MC/MCValue.h" 27 #include "llvm/MC/TargetRegistry.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/MathExtras.h" 30 #include "llvm/TargetParser/Triple.h" 31 using namespace llvm; 32 33 namespace { 34 35 class AArch64AsmBackend : public MCAsmBackend { 36 static const unsigned PCRelFlagVal = 37 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; 38 protected: 39 Triple TheTriple; 40 41 public: 42 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian) 43 : MCAsmBackend(IsLittleEndian ? llvm::endianness::little 44 : llvm::endianness::big), 45 TheTriple(TT) {} 46 47 unsigned getNumFixupKinds() const override { 48 return AArch64::NumTargetFixupKinds; 49 } 50 51 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override; 52 53 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { 54 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { 55 // This table *must* be in the order that the fixup_* kinds are defined 56 // in AArch64FixupKinds.h. 57 // 58 // Name Offset (bits) Size (bits) Flags 59 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal}, 60 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal}, 61 {"fixup_aarch64_add_imm12", 10, 12, 0}, 62 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0}, 63 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0}, 64 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0}, 65 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0}, 66 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0}, 67 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal}, 68 {"fixup_aarch64_movw", 5, 16, 0}, 69 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal}, 70 {"fixup_aarch64_pcrel_branch16", 5, 16, PCRelFlagVal}, 71 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal}, 72 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal}, 73 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}}; 74 75 // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not 76 // require any extra processing. 77 if (Kind >= FirstLiteralRelocationKind) 78 return MCAsmBackend::getFixupKindInfo(FK_NONE); 79 80 if (Kind < FirstTargetFixupKind) 81 return MCAsmBackend::getFixupKindInfo(Kind); 82 83 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 84 "Invalid kind!"); 85 return Infos[Kind - FirstTargetFixupKind]; 86 } 87 88 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 89 const MCValue &Target, MutableArrayRef<char> Data, 90 uint64_t Value, bool IsResolved, 91 const MCSubtargetInfo *STI) const override; 92 93 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 94 const MCRelaxableFragment *DF, 95 const MCAsmLayout &Layout) const override; 96 void relaxInstruction(MCInst &Inst, 97 const MCSubtargetInfo &STI) const override; 98 bool writeNopData(raw_ostream &OS, uint64_t Count, 99 const MCSubtargetInfo *STI) const override; 100 101 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; 102 103 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, 104 const MCValue &Target, 105 const MCSubtargetInfo *STI) override; 106 }; 107 108 } // end anonymous namespace 109 110 /// The number of bytes the fixup may change. 111 static unsigned getFixupKindNumBytes(unsigned Kind) { 112 switch (Kind) { 113 default: 114 llvm_unreachable("Unknown fixup kind!"); 115 116 case FK_Data_1: 117 return 1; 118 119 case FK_Data_2: 120 case FK_SecRel_2: 121 return 2; 122 123 case AArch64::fixup_aarch64_movw: 124 case AArch64::fixup_aarch64_pcrel_branch14: 125 case AArch64::fixup_aarch64_pcrel_branch16: 126 case AArch64::fixup_aarch64_add_imm12: 127 case AArch64::fixup_aarch64_ldst_imm12_scale1: 128 case AArch64::fixup_aarch64_ldst_imm12_scale2: 129 case AArch64::fixup_aarch64_ldst_imm12_scale4: 130 case AArch64::fixup_aarch64_ldst_imm12_scale8: 131 case AArch64::fixup_aarch64_ldst_imm12_scale16: 132 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 133 case AArch64::fixup_aarch64_pcrel_branch19: 134 return 3; 135 136 case AArch64::fixup_aarch64_pcrel_adr_imm21: 137 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 138 case AArch64::fixup_aarch64_pcrel_branch26: 139 case AArch64::fixup_aarch64_pcrel_call26: 140 case FK_Data_4: 141 case FK_SecRel_4: 142 return 4; 143 144 case FK_Data_8: 145 return 8; 146 } 147 } 148 149 static unsigned AdrImmBits(unsigned Value) { 150 unsigned lo2 = Value & 0x3; 151 unsigned hi19 = (Value & 0x1ffffc) >> 2; 152 return (hi19 << 5) | (lo2 << 29); 153 } 154 155 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, 156 uint64_t Value, MCContext &Ctx, 157 const Triple &TheTriple, bool IsResolved) { 158 int64_t SignedValue = static_cast<int64_t>(Value); 159 switch (Fixup.getTargetKind()) { 160 default: 161 llvm_unreachable("Unknown fixup kind!"); 162 case AArch64::fixup_aarch64_pcrel_adr_imm21: 163 if (!isInt<21>(SignedValue)) 164 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 165 return AdrImmBits(Value & 0x1fffffULL); 166 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 167 assert(!IsResolved); 168 if (TheTriple.isOSBinFormatCOFF()) { 169 if (!isInt<21>(SignedValue)) 170 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 171 return AdrImmBits(Value & 0x1fffffULL); 172 } 173 return AdrImmBits((Value & 0x1fffff000ULL) >> 12); 174 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 175 case AArch64::fixup_aarch64_pcrel_branch19: 176 // Signed 19-bit immediate which gets multiplied by 4 177 if (!isInt<21>(SignedValue)) 178 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 179 if (Value & 0x3) 180 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 181 // Low two bits are not encoded. 182 return (Value >> 2) & 0x7ffff; 183 case AArch64::fixup_aarch64_add_imm12: 184 case AArch64::fixup_aarch64_ldst_imm12_scale1: 185 if (TheTriple.isOSBinFormatCOFF() && !IsResolved) 186 Value &= 0xfff; 187 // Unsigned 12-bit immediate 188 if (!isUInt<12>(Value)) 189 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 190 return Value; 191 case AArch64::fixup_aarch64_ldst_imm12_scale2: 192 if (TheTriple.isOSBinFormatCOFF() && !IsResolved) 193 Value &= 0xfff; 194 // Unsigned 12-bit immediate which gets multiplied by 2 195 if (!isUInt<13>(Value)) 196 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 197 if (Value & 0x1) 198 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned"); 199 return Value >> 1; 200 case AArch64::fixup_aarch64_ldst_imm12_scale4: 201 if (TheTriple.isOSBinFormatCOFF() && !IsResolved) 202 Value &= 0xfff; 203 // Unsigned 12-bit immediate which gets multiplied by 4 204 if (!isUInt<14>(Value)) 205 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 206 if (Value & 0x3) 207 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned"); 208 return Value >> 2; 209 case AArch64::fixup_aarch64_ldst_imm12_scale8: 210 if (TheTriple.isOSBinFormatCOFF() && !IsResolved) 211 Value &= 0xfff; 212 // Unsigned 12-bit immediate which gets multiplied by 8 213 if (!isUInt<15>(Value)) 214 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 215 if (Value & 0x7) 216 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned"); 217 return Value >> 3; 218 case AArch64::fixup_aarch64_ldst_imm12_scale16: 219 if (TheTriple.isOSBinFormatCOFF() && !IsResolved) 220 Value &= 0xfff; 221 // Unsigned 12-bit immediate which gets multiplied by 16 222 if (!isUInt<16>(Value)) 223 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 224 if (Value & 0xf) 225 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); 226 return Value >> 4; 227 case AArch64::fixup_aarch64_movw: { 228 AArch64MCExpr::VariantKind RefKind = 229 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); 230 if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS && 231 AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) { 232 if (!RefKind) { 233 // The fixup is an expression 234 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) 235 Ctx.reportError(Fixup.getLoc(), 236 "fixup value out of range [-0xFFFF, 0xFFFF]"); 237 238 // Invert the negative immediate because it will feed into a MOVN. 239 if (SignedValue < 0) 240 SignedValue = ~SignedValue; 241 Value = static_cast<uint64_t>(SignedValue); 242 } else 243 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't 244 // ever be resolved in the assembler. 245 Ctx.reportError(Fixup.getLoc(), 246 "relocation for a thread-local variable points to an " 247 "absolute symbol"); 248 return Value; 249 } 250 251 if (!IsResolved) { 252 // FIXME: Figure out when this can actually happen, and verify our 253 // behavior. 254 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " 255 "implemented"); 256 return Value; 257 } 258 259 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { 260 switch (AArch64MCExpr::getAddressFrag(RefKind)) { 261 case AArch64MCExpr::VK_G0: 262 break; 263 case AArch64MCExpr::VK_G1: 264 SignedValue = SignedValue >> 16; 265 break; 266 case AArch64MCExpr::VK_G2: 267 SignedValue = SignedValue >> 32; 268 break; 269 case AArch64MCExpr::VK_G3: 270 SignedValue = SignedValue >> 48; 271 break; 272 default: 273 llvm_unreachable("Variant kind doesn't correspond to fixup"); 274 } 275 276 } else { 277 switch (AArch64MCExpr::getAddressFrag(RefKind)) { 278 case AArch64MCExpr::VK_G0: 279 break; 280 case AArch64MCExpr::VK_G1: 281 Value = Value >> 16; 282 break; 283 case AArch64MCExpr::VK_G2: 284 Value = Value >> 32; 285 break; 286 case AArch64MCExpr::VK_G3: 287 Value = Value >> 48; 288 break; 289 default: 290 llvm_unreachable("Variant kind doesn't correspond to fixup"); 291 } 292 } 293 294 if (RefKind & AArch64MCExpr::VK_NC) { 295 Value &= 0xFFFF; 296 } 297 else if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { 298 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) 299 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 300 301 // Invert the negative immediate because it will feed into a MOVN. 302 if (SignedValue < 0) 303 SignedValue = ~SignedValue; 304 Value = static_cast<uint64_t>(SignedValue); 305 } 306 else if (Value > 0xFFFF) { 307 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 308 } 309 return Value; 310 } 311 case AArch64::fixup_aarch64_pcrel_branch14: 312 // Signed 16-bit immediate 313 if (!isInt<16>(SignedValue)) 314 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 315 // Low two bits are not encoded (4-byte alignment assumed). 316 if (Value & 0x3) 317 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 318 return (Value >> 2) & 0x3fff; 319 case AArch64::fixup_aarch64_pcrel_branch16: 320 // Unsigned PC-relative offset, so invert the negative immediate. 321 SignedValue = -SignedValue; 322 Value = static_cast<uint64_t>(SignedValue); 323 // Check valid 18-bit unsigned range. 324 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1)) 325 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 326 // Low two bits are not encoded (4-byte alignment assumed). 327 if (Value & 0b11) 328 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 329 return (Value >> 2) & 0xffff; 330 case AArch64::fixup_aarch64_pcrel_branch26: 331 case AArch64::fixup_aarch64_pcrel_call26: 332 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) { 333 // MSVC link.exe and lld do not support this relocation type 334 // with a non-zero offset 335 Ctx.reportError(Fixup.getLoc(), 336 "cannot perform a PC-relative fixup with a non-zero " 337 "symbol offset"); 338 } 339 // Signed 28-bit immediate 340 if (!isInt<28>(SignedValue)) 341 Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); 342 // Low two bits are not encoded (4-byte alignment assumed). 343 if (Value & 0x3) 344 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 345 return (Value >> 2) & 0x3ffffff; 346 case FK_Data_1: 347 case FK_Data_2: 348 case FK_Data_4: 349 case FK_Data_8: 350 case FK_SecRel_2: 351 case FK_SecRel_4: 352 return Value; 353 } 354 } 355 356 std::optional<MCFixupKind> 357 AArch64AsmBackend::getFixupKind(StringRef Name) const { 358 if (!TheTriple.isOSBinFormatELF()) 359 return std::nullopt; 360 361 unsigned Type = llvm::StringSwitch<unsigned>(Name) 362 #define ELF_RELOC(X, Y) .Case(#X, Y) 363 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def" 364 #undef ELF_RELOC 365 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE) 366 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16) 367 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32) 368 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64) 369 .Default(-1u); 370 if (Type == -1u) 371 return std::nullopt; 372 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 373 } 374 375 /// getFixupKindContainereSizeInBytes - The number of bytes of the 376 /// container involved in big endian or 0 if the item is little endian 377 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { 378 if (Endian == llvm::endianness::little) 379 return 0; 380 381 switch (Kind) { 382 default: 383 llvm_unreachable("Unknown fixup kind!"); 384 385 case FK_Data_1: 386 return 1; 387 case FK_Data_2: 388 return 2; 389 case FK_Data_4: 390 return 4; 391 case FK_Data_8: 392 return 8; 393 394 case AArch64::fixup_aarch64_movw: 395 case AArch64::fixup_aarch64_pcrel_branch14: 396 case AArch64::fixup_aarch64_pcrel_branch16: 397 case AArch64::fixup_aarch64_add_imm12: 398 case AArch64::fixup_aarch64_ldst_imm12_scale1: 399 case AArch64::fixup_aarch64_ldst_imm12_scale2: 400 case AArch64::fixup_aarch64_ldst_imm12_scale4: 401 case AArch64::fixup_aarch64_ldst_imm12_scale8: 402 case AArch64::fixup_aarch64_ldst_imm12_scale16: 403 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 404 case AArch64::fixup_aarch64_pcrel_branch19: 405 case AArch64::fixup_aarch64_pcrel_adr_imm21: 406 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 407 case AArch64::fixup_aarch64_pcrel_branch26: 408 case AArch64::fixup_aarch64_pcrel_call26: 409 // Instructions are always little endian 410 return 0; 411 } 412 } 413 414 void AArch64AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 415 const MCValue &Target, 416 MutableArrayRef<char> Data, uint64_t Value, 417 bool IsResolved, 418 const MCSubtargetInfo *STI) const { 419 if (Fixup.getTargetKind() == FK_Data_8 && TheTriple.isOSBinFormatELF()) { 420 auto RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); 421 AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind); 422 if (SymLoc == AArch64AuthMCExpr::VK_AUTH || 423 SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) { 424 assert(Value == 0); 425 const auto *Expr = cast<AArch64AuthMCExpr>(Fixup.getValue()); 426 Value = (uint64_t(Expr->getDiscriminator()) << 32) | 427 (uint64_t(Expr->getKey()) << 60) | 428 (uint64_t(Expr->hasAddressDiversity()) << 63); 429 } 430 } 431 432 if (!Value) 433 return; // Doesn't change encoding. 434 unsigned Kind = Fixup.getKind(); 435 if (Kind >= FirstLiteralRelocationKind) 436 return; 437 unsigned NumBytes = getFixupKindNumBytes(Kind); 438 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 439 MCContext &Ctx = Asm.getContext(); 440 int64_t SignedValue = static_cast<int64_t>(Value); 441 // Apply any target-specific value adjustments. 442 Value = adjustFixupValue(Fixup, Target, Value, Ctx, TheTriple, IsResolved); 443 444 // Shift the value into position. 445 Value <<= Info.TargetOffset; 446 447 unsigned Offset = Fixup.getOffset(); 448 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 449 450 // Used to point to big endian bytes. 451 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); 452 453 // For each byte of the fragment that the fixup touches, mask in the 454 // bits from the fixup value. 455 if (FulleSizeInBytes == 0) { 456 // Handle as little-endian 457 for (unsigned i = 0; i != NumBytes; ++i) { 458 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); 459 } 460 } else { 461 // Handle as big-endian 462 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!"); 463 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!"); 464 for (unsigned i = 0; i != NumBytes; ++i) { 465 unsigned Idx = FulleSizeInBytes - 1 - i; 466 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 467 } 468 } 469 470 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to 471 // handle this more cleanly. This may affect the output of -show-mc-encoding. 472 AArch64MCExpr::VariantKind RefKind = 473 static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); 474 if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS || 475 (!RefKind && Fixup.getTargetKind() == AArch64::fixup_aarch64_movw)) { 476 // If the immediate is negative, generate MOVN else MOVZ. 477 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ. 478 if (SignedValue < 0) 479 Data[Offset + 3] &= ~(1 << 6); 480 else 481 Data[Offset + 3] |= (1 << 6); 482 } 483 } 484 485 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 486 uint64_t Value, 487 const MCRelaxableFragment *DF, 488 const MCAsmLayout &Layout) const { 489 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic 490 // into the targets for now. 491 // 492 // Relax if the value is too big for a (signed) i8. 493 return int64_t(Value) != int64_t(int8_t(Value)); 494 } 495 496 void AArch64AsmBackend::relaxInstruction(MCInst &Inst, 497 const MCSubtargetInfo &STI) const { 498 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented"); 499 } 500 501 bool AArch64AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 502 const MCSubtargetInfo *STI) const { 503 // If the count is not 4-byte aligned, we must be writing data into the text 504 // section (otherwise we have unaligned instructions, and thus have far 505 // bigger problems), so just write zeros instead. 506 OS.write_zeros(Count % 4); 507 508 // We are properly aligned, so write NOPs as requested. 509 Count /= 4; 510 for (uint64_t i = 0; i != Count; ++i) 511 OS.write("\x1f\x20\x03\xd5", 4); 512 return true; 513 } 514 515 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, 516 const MCFixup &Fixup, 517 const MCValue &Target, 518 const MCSubtargetInfo *STI) { 519 unsigned Kind = Fixup.getKind(); 520 if (Kind >= FirstLiteralRelocationKind) 521 return true; 522 523 // The ADRP instruction adds some multiple of 0x1000 to the current PC & 524 // ~0xfff. This means that the required offset to reach a symbol can vary by 525 // up to one step depending on where the ADRP is in memory. For example: 526 // 527 // ADRP x0, there 528 // there: 529 // 530 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and 531 // we'll need that as an offset. At any other address "there" will be in the 532 // same page as the ADRP and the instruction should encode 0x0. Assuming the 533 // section isn't 0x1000-aligned, we therefore need to delegate this decision 534 // to the linker -- a relocation! 535 if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21) 536 return true; 537 538 return false; 539 } 540 541 namespace { 542 543 namespace CU { 544 545 /// Compact unwind encoding values. 546 enum CompactUnwindEncodings { 547 /// A "frameless" leaf function, where no non-volatile registers are 548 /// saved. The return remains in LR throughout the function. 549 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, 550 551 /// No compact unwind encoding available. Instead the low 23-bits of 552 /// the compact unwind encoding is the offset of the DWARF FDE in the 553 /// __eh_frame section. This mode is never used in object files. It is only 554 /// generated by the linker in final linked images, which have only DWARF info 555 /// for a function. 556 UNWIND_ARM64_MODE_DWARF = 0x03000000, 557 558 /// This is a standard arm64 prologue where FP/LR are immediately 559 /// pushed on the stack, then SP is copied to FP. If there are any 560 /// non-volatile register saved, they are copied into the stack fame in pairs 561 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the 562 /// five X pairs and four D pairs can be saved, but the memory layout must be 563 /// in register number order. 564 UNWIND_ARM64_MODE_FRAME = 0x04000000, 565 566 /// Frame register pair encodings. 567 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, 568 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, 569 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, 570 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, 571 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, 572 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, 573 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, 574 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, 575 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 576 }; 577 578 } // end CU namespace 579 580 // FIXME: This should be in a separate file. 581 class DarwinAArch64AsmBackend : public AArch64AsmBackend { 582 const MCRegisterInfo &MRI; 583 584 /// Encode compact unwind stack adjustment for frameless functions. 585 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. 586 /// The stack size always needs to be 16 byte aligned. 587 uint32_t encodeStackAdjustment(uint32_t StackSize) const { 588 return (StackSize / 16) << 12; 589 } 590 591 public: 592 DarwinAArch64AsmBackend(const Target &T, const Triple &TT, 593 const MCRegisterInfo &MRI) 594 : AArch64AsmBackend(T, TT, /*IsLittleEndian*/ true), MRI(MRI) {} 595 596 std::unique_ptr<MCObjectTargetWriter> 597 createObjectTargetWriter() const override { 598 uint32_t CPUType = cantFail(MachO::getCPUType(TheTriple)); 599 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TheTriple)); 600 return createAArch64MachObjectWriter(CPUType, CPUSubType, 601 TheTriple.isArch32Bit()); 602 } 603 604 /// Generate the compact unwind encoding from the CFI directives. 605 uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI, 606 const MCContext *Ctxt) const override { 607 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions; 608 if (Instrs.empty()) 609 return CU::UNWIND_ARM64_MODE_FRAMELESS; 610 if (!isDarwinCanonicalPersonality(FI->Personality) && 611 !Ctxt->emitCompactUnwindNonCanonical()) 612 return CU::UNWIND_ARM64_MODE_DWARF; 613 614 bool HasFP = false; 615 unsigned StackSize = 0; 616 617 uint32_t CompactUnwindEncoding = 0; 618 int CurOffset = 0; 619 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 620 const MCCFIInstruction &Inst = Instrs[i]; 621 622 switch (Inst.getOperation()) { 623 default: 624 // Cannot handle this directive: bail out. 625 return CU::UNWIND_ARM64_MODE_DWARF; 626 case MCCFIInstruction::OpDefCfa: { 627 // Defines a frame pointer. 628 unsigned XReg = 629 getXRegFromWReg(*MRI.getLLVMRegNum(Inst.getRegister(), true)); 630 631 // Other CFA registers than FP are not supported by compact unwind. 632 // Fallback on DWARF. 633 // FIXME: When opt-remarks are supported in MC, add a remark to notify 634 // the user. 635 if (XReg != AArch64::FP) 636 return CU::UNWIND_ARM64_MODE_DWARF; 637 638 if (i + 2 >= e) 639 return CU::UNWIND_ARM64_MODE_DWARF; 640 641 const MCCFIInstruction &LRPush = Instrs[++i]; 642 if (LRPush.getOperation() != MCCFIInstruction::OpOffset) 643 return CU::UNWIND_ARM64_MODE_DWARF; 644 const MCCFIInstruction &FPPush = Instrs[++i]; 645 if (FPPush.getOperation() != MCCFIInstruction::OpOffset) 646 return CU::UNWIND_ARM64_MODE_DWARF; 647 648 if (FPPush.getOffset() + 8 != LRPush.getOffset()) 649 return CU::UNWIND_ARM64_MODE_DWARF; 650 CurOffset = FPPush.getOffset(); 651 652 unsigned LRReg = *MRI.getLLVMRegNum(LRPush.getRegister(), true); 653 unsigned FPReg = *MRI.getLLVMRegNum(FPPush.getRegister(), true); 654 655 LRReg = getXRegFromWReg(LRReg); 656 FPReg = getXRegFromWReg(FPReg); 657 658 if (LRReg != AArch64::LR || FPReg != AArch64::FP) 659 return CU::UNWIND_ARM64_MODE_DWARF; 660 661 // Indicate that the function has a frame. 662 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; 663 HasFP = true; 664 break; 665 } 666 case MCCFIInstruction::OpDefCfaOffset: { 667 if (StackSize != 0) 668 return CU::UNWIND_ARM64_MODE_DWARF; 669 StackSize = std::abs(Inst.getOffset()); 670 break; 671 } 672 case MCCFIInstruction::OpOffset: { 673 // Registers are saved in pairs. We expect there to be two consecutive 674 // `.cfi_offset' instructions with the appropriate registers specified. 675 unsigned Reg1 = *MRI.getLLVMRegNum(Inst.getRegister(), true); 676 if (i + 1 == e) 677 return CU::UNWIND_ARM64_MODE_DWARF; 678 679 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8) 680 return CU::UNWIND_ARM64_MODE_DWARF; 681 CurOffset = Inst.getOffset(); 682 683 const MCCFIInstruction &Inst2 = Instrs[++i]; 684 if (Inst2.getOperation() != MCCFIInstruction::OpOffset) 685 return CU::UNWIND_ARM64_MODE_DWARF; 686 unsigned Reg2 = *MRI.getLLVMRegNum(Inst2.getRegister(), true); 687 688 if (Inst2.getOffset() != CurOffset - 8) 689 return CU::UNWIND_ARM64_MODE_DWARF; 690 CurOffset = Inst2.getOffset(); 691 692 // N.B. The encodings must be in register number order, and the X 693 // registers before the D registers. 694 695 // X19/X20 pair = 0x00000001, 696 // X21/X22 pair = 0x00000002, 697 // X23/X24 pair = 0x00000004, 698 // X25/X26 pair = 0x00000008, 699 // X27/X28 pair = 0x00000010 700 Reg1 = getXRegFromWReg(Reg1); 701 Reg2 = getXRegFromWReg(Reg2); 702 703 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 && 704 (CompactUnwindEncoding & 0xF1E) == 0) 705 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; 706 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 && 707 (CompactUnwindEncoding & 0xF1C) == 0) 708 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; 709 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 && 710 (CompactUnwindEncoding & 0xF18) == 0) 711 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; 712 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 && 713 (CompactUnwindEncoding & 0xF10) == 0) 714 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; 715 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 && 716 (CompactUnwindEncoding & 0xF00) == 0) 717 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; 718 else { 719 Reg1 = getDRegFromBReg(Reg1); 720 Reg2 = getDRegFromBReg(Reg2); 721 722 // D8/D9 pair = 0x00000100, 723 // D10/D11 pair = 0x00000200, 724 // D12/D13 pair = 0x00000400, 725 // D14/D15 pair = 0x00000800 726 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 && 727 (CompactUnwindEncoding & 0xE00) == 0) 728 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; 729 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 && 730 (CompactUnwindEncoding & 0xC00) == 0) 731 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; 732 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 && 733 (CompactUnwindEncoding & 0x800) == 0) 734 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; 735 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15) 736 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; 737 else 738 // A pair was pushed which we cannot handle. 739 return CU::UNWIND_ARM64_MODE_DWARF; 740 } 741 742 break; 743 } 744 } 745 } 746 747 if (!HasFP) { 748 // With compact unwind info we can only represent stack adjustments of up 749 // to 65520 bytes. 750 if (StackSize > 65520) 751 return CU::UNWIND_ARM64_MODE_DWARF; 752 753 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; 754 CompactUnwindEncoding |= encodeStackAdjustment(StackSize); 755 } 756 757 return CompactUnwindEncoding; 758 } 759 }; 760 761 } // end anonymous namespace 762 763 namespace { 764 765 class ELFAArch64AsmBackend : public AArch64AsmBackend { 766 public: 767 uint8_t OSABI; 768 bool IsILP32; 769 770 ELFAArch64AsmBackend(const Target &T, const Triple &TT, uint8_t OSABI, 771 bool IsLittleEndian, bool IsILP32) 772 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI), 773 IsILP32(IsILP32) {} 774 775 std::unique_ptr<MCObjectTargetWriter> 776 createObjectTargetWriter() const override { 777 return createAArch64ELFObjectWriter(OSABI, IsILP32); 778 } 779 }; 780 781 } 782 783 namespace { 784 class COFFAArch64AsmBackend : public AArch64AsmBackend { 785 public: 786 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple) 787 : AArch64AsmBackend(T, TheTriple, /*IsLittleEndian*/ true) {} 788 789 std::unique_ptr<MCObjectTargetWriter> 790 createObjectTargetWriter() const override { 791 return createAArch64WinCOFFObjectWriter(TheTriple); 792 } 793 }; 794 } 795 796 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, 797 const MCSubtargetInfo &STI, 798 const MCRegisterInfo &MRI, 799 const MCTargetOptions &Options) { 800 const Triple &TheTriple = STI.getTargetTriple(); 801 if (TheTriple.isOSBinFormatMachO()) { 802 return new DarwinAArch64AsmBackend(T, TheTriple, MRI); 803 } 804 805 if (TheTriple.isOSBinFormatCOFF()) 806 return new COFFAArch64AsmBackend(T, TheTriple); 807 808 assert(TheTriple.isOSBinFormatELF() && "Invalid target"); 809 810 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 811 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; 812 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/true, 813 IsILP32); 814 } 815 816 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, 817 const MCSubtargetInfo &STI, 818 const MCRegisterInfo &MRI, 819 const MCTargetOptions &Options) { 820 const Triple &TheTriple = STI.getTargetTriple(); 821 assert(TheTriple.isOSBinFormatELF() && 822 "Big endian is only supported for ELF targets!"); 823 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 824 bool IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32; 825 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, /*IsLittleEndian=*/false, 826 IsILP32); 827 } 828