1 //===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a printer that converts from our internal representation 10 // of machine-dependent LLVM code to the AArch64 assembly language. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AArch64.h" 15 #include "AArch64MCInstLower.h" 16 #include "AArch64MachineFunctionInfo.h" 17 #include "AArch64RegisterInfo.h" 18 #include "AArch64Subtarget.h" 19 #include "AArch64TargetObjectFile.h" 20 #include "MCTargetDesc/AArch64AddressingModes.h" 21 #include "MCTargetDesc/AArch64InstPrinter.h" 22 #include "MCTargetDesc/AArch64MCExpr.h" 23 #include "MCTargetDesc/AArch64MCTargetDesc.h" 24 #include "MCTargetDesc/AArch64TargetStreamer.h" 25 #include "TargetInfo/AArch64TargetInfo.h" 26 #include "Utils/AArch64BaseInfo.h" 27 #include "llvm/ADT/SmallString.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/ADT/Triple.h" 31 #include "llvm/ADT/Twine.h" 32 #include "llvm/BinaryFormat/COFF.h" 33 #include "llvm/BinaryFormat/ELF.h" 34 #include "llvm/CodeGen/AsmPrinter.h" 35 #include "llvm/CodeGen/FaultMaps.h" 36 #include "llvm/CodeGen/MachineBasicBlock.h" 37 #include "llvm/CodeGen/MachineFunction.h" 38 #include "llvm/CodeGen/MachineInstr.h" 39 #include "llvm/CodeGen/MachineJumpTableInfo.h" 40 #include "llvm/CodeGen/MachineModuleInfoImpls.h" 41 #include "llvm/CodeGen/MachineOperand.h" 42 #include "llvm/CodeGen/StackMaps.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/IR/DataLayout.h" 45 #include "llvm/IR/DebugInfoMetadata.h" 46 #include "llvm/MC/MCAsmInfo.h" 47 #include "llvm/MC/MCContext.h" 48 #include "llvm/MC/MCInst.h" 49 #include "llvm/MC/MCInstBuilder.h" 50 #include "llvm/MC/MCSectionELF.h" 51 #include "llvm/MC/MCStreamer.h" 52 #include "llvm/MC/MCSymbol.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/TargetRegistry.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Target/TargetMachine.h" 58 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h" 59 #include <algorithm> 60 #include <cassert> 61 #include <cstdint> 62 #include <map> 63 #include <memory> 64 65 using namespace llvm; 66 67 #define DEBUG_TYPE "asm-printer" 68 69 namespace { 70 71 class AArch64AsmPrinter : public AsmPrinter { 72 AArch64MCInstLower MCInstLowering; 73 StackMaps SM; 74 FaultMaps FM; 75 const AArch64Subtarget *STI; 76 77 public: 78 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer) 79 : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this), 80 SM(*this), FM(*this) {} 81 82 StringRef getPassName() const override { return "AArch64 Assembly Printer"; } 83 84 /// Wrapper for MCInstLowering.lowerOperand() for the 85 /// tblgen'erated pseudo lowering. 86 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { 87 return MCInstLowering.lowerOperand(MO, MCOp); 88 } 89 90 void emitStartOfAsmFile(Module &M) override; 91 void emitJumpTableInfo() override; 92 93 void emitFunctionEntryLabel() override; 94 95 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI); 96 97 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, 98 const MachineInstr &MI); 99 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, 100 const MachineInstr &MI); 101 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM, 102 const MachineInstr &MI); 103 void LowerFAULTING_OP(const MachineInstr &MI); 104 105 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI); 106 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI); 107 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI); 108 109 typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple; 110 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols; 111 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI); 112 void EmitHwasanMemaccessSymbols(Module &M); 113 114 void EmitSled(const MachineInstr &MI, SledKind Kind); 115 116 /// tblgen'erated driver function for lowering simple MI->MC 117 /// pseudo instructions. 118 bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, 119 const MachineInstr *MI); 120 121 void emitInstruction(const MachineInstr *MI) override; 122 123 void emitFunctionHeaderComment() override; 124 125 void getAnalysisUsage(AnalysisUsage &AU) const override { 126 AsmPrinter::getAnalysisUsage(AU); 127 AU.setPreservesAll(); 128 } 129 130 bool runOnMachineFunction(MachineFunction &MF) override { 131 AArch64FI = MF.getInfo<AArch64FunctionInfo>(); 132 STI = static_cast<const AArch64Subtarget*>(&MF.getSubtarget()); 133 134 SetupMachineFunction(MF); 135 136 if (STI->isTargetCOFF()) { 137 bool Internal = MF.getFunction().hasInternalLinkage(); 138 COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC 139 : COFF::IMAGE_SYM_CLASS_EXTERNAL; 140 int Type = 141 COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT; 142 143 OutStreamer->BeginCOFFSymbolDef(CurrentFnSym); 144 OutStreamer->EmitCOFFSymbolStorageClass(Scl); 145 OutStreamer->EmitCOFFSymbolType(Type); 146 OutStreamer->EndCOFFSymbolDef(); 147 } 148 149 // Emit the rest of the function body. 150 emitFunctionBody(); 151 152 // Emit the XRay table for this function. 153 emitXRayTable(); 154 155 // We didn't modify anything. 156 return false; 157 } 158 159 private: 160 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O); 161 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O); 162 bool printAsmRegInClass(const MachineOperand &MO, 163 const TargetRegisterClass *RC, unsigned AltName, 164 raw_ostream &O); 165 166 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, 167 const char *ExtraCode, raw_ostream &O) override; 168 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, 169 const char *ExtraCode, raw_ostream &O) override; 170 171 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); 172 173 void emitFunctionBodyEnd() override; 174 175 MCSymbol *GetCPISymbol(unsigned CPID) const override; 176 void emitEndOfAsmFile(Module &M) override; 177 178 AArch64FunctionInfo *AArch64FI = nullptr; 179 180 /// Emit the LOHs contained in AArch64FI. 181 void EmitLOHs(); 182 183 /// Emit instruction to set float register to zero. 184 void EmitFMov0(const MachineInstr &MI); 185 186 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>; 187 188 MInstToMCSymbol LOHInstToLabel; 189 }; 190 191 } // end anonymous namespace 192 193 void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) { 194 if (!TM.getTargetTriple().isOSBinFormatELF()) 195 return; 196 197 // Assemble feature flags that may require creation of a note section. 198 unsigned Flags = 0; 199 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>( 200 M.getModuleFlag("branch-target-enforcement"))) 201 if (BTE->getZExtValue()) 202 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI; 203 204 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>( 205 M.getModuleFlag("sign-return-address"))) 206 if (Sign->getZExtValue()) 207 Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_PAC; 208 209 if (Flags == 0) 210 return; 211 212 // Emit a .note.gnu.property section with the flags. 213 if (auto *TS = static_cast<AArch64TargetStreamer *>( 214 OutStreamer->getTargetStreamer())) 215 TS->emitNoteSection(Flags); 216 } 217 218 void AArch64AsmPrinter::emitFunctionHeaderComment() { 219 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>(); 220 Optional<std::string> OutlinerString = FI->getOutliningStyle(); 221 if (OutlinerString != None) 222 OutStreamer->GetCommentOS() << ' ' << OutlinerString; 223 } 224 225 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI) 226 { 227 const Function &F = MF->getFunction(); 228 if (F.hasFnAttribute("patchable-function-entry")) { 229 unsigned Num; 230 if (F.getFnAttribute("patchable-function-entry") 231 .getValueAsString() 232 .getAsInteger(10, Num)) 233 return; 234 emitNops(Num); 235 return; 236 } 237 238 EmitSled(MI, SledKind::FUNCTION_ENTER); 239 } 240 241 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) 242 { 243 EmitSled(MI, SledKind::FUNCTION_EXIT); 244 } 245 246 void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) 247 { 248 EmitSled(MI, SledKind::TAIL_CALL); 249 } 250 251 void AArch64AsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind) 252 { 253 static const int8_t NoopsInSledCount = 7; 254 // We want to emit the following pattern: 255 // 256 // .Lxray_sled_N: 257 // ALIGN 258 // B #32 259 // ; 7 NOP instructions (28 bytes) 260 // .tmpN 261 // 262 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching 263 // over the full 32 bytes (8 instructions) with the following pattern: 264 // 265 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack 266 // LDR W0, #12 ; W0 := function ID 267 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit 268 // BLR X16 ; call the tracing trampoline 269 // ;DATA: 32 bits of function ID 270 // ;DATA: lower 32 bits of the address of the trampoline 271 // ;DATA: higher 32 bits of the address of the trampoline 272 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack 273 // 274 OutStreamer->emitCodeAlignment(4); 275 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 276 OutStreamer->emitLabel(CurSled); 277 auto Target = OutContext.createTempSymbol(); 278 279 // Emit "B #32" instruction, which jumps over the next 28 bytes. 280 // The operand has to be the number of 4-byte instructions to jump over, 281 // including the current instruction. 282 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8)); 283 284 for (int8_t I = 0; I < NoopsInSledCount; I++) 285 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0)); 286 287 OutStreamer->emitLabel(Target); 288 recordSled(CurSled, MI, Kind, 2); 289 } 290 291 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) { 292 Register Reg = MI.getOperand(0).getReg(); 293 bool IsShort = 294 MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES; 295 uint32_t AccessInfo = MI.getOperand(1).getImm(); 296 MCSymbol *&Sym = 297 HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)]; 298 if (!Sym) { 299 // FIXME: Make this work on non-ELF. 300 if (!TM.getTargetTriple().isOSBinFormatELF()) 301 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF"); 302 303 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" + 304 utostr(AccessInfo); 305 if (IsShort) 306 SymName += "_short_v2"; 307 Sym = OutContext.getOrCreateSymbol(SymName); 308 } 309 310 EmitToStreamer(*OutStreamer, 311 MCInstBuilder(AArch64::BL) 312 .addExpr(MCSymbolRefExpr::create(Sym, OutContext))); 313 } 314 315 void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) { 316 if (HwasanMemaccessSymbols.empty()) 317 return; 318 319 const Triple &TT = TM.getTargetTriple(); 320 assert(TT.isOSBinFormatELF()); 321 std::unique_ptr<MCSubtargetInfo> STI( 322 TM.getTarget().createMCSubtargetInfo(TT.str(), "", "")); 323 assert(STI && "Unable to create subtarget info"); 324 325 MCSymbol *HwasanTagMismatchV1Sym = 326 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch"); 327 MCSymbol *HwasanTagMismatchV2Sym = 328 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2"); 329 330 const MCSymbolRefExpr *HwasanTagMismatchV1Ref = 331 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext); 332 const MCSymbolRefExpr *HwasanTagMismatchV2Ref = 333 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext); 334 335 for (auto &P : HwasanMemaccessSymbols) { 336 unsigned Reg = std::get<0>(P.first); 337 bool IsShort = std::get<1>(P.first); 338 uint32_t AccessInfo = std::get<2>(P.first); 339 const MCSymbolRefExpr *HwasanTagMismatchRef = 340 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref; 341 MCSymbol *Sym = P.second; 342 343 bool HasMatchAllTag = 344 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1; 345 uint8_t MatchAllTag = 346 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff; 347 unsigned Size = 348 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf); 349 bool CompileKernel = 350 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1; 351 352 OutStreamer->SwitchSection(OutContext.getELFSection( 353 ".text.hot", ELF::SHT_PROGBITS, 354 ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, 355 Sym->getName())); 356 357 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction); 358 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak); 359 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden); 360 OutStreamer->emitLabel(Sym); 361 362 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri) 363 .addReg(AArch64::X16) 364 .addReg(Reg) 365 .addImm(4) 366 .addImm(55), 367 *STI); 368 OutStreamer->emitInstruction( 369 MCInstBuilder(AArch64::LDRBBroX) 370 .addReg(AArch64::W16) 371 .addReg(IsShort ? AArch64::X20 : AArch64::X9) 372 .addReg(AArch64::X16) 373 .addImm(0) 374 .addImm(0), 375 *STI); 376 OutStreamer->emitInstruction( 377 MCInstBuilder(AArch64::SUBSXrs) 378 .addReg(AArch64::XZR) 379 .addReg(AArch64::X16) 380 .addReg(Reg) 381 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)), 382 *STI); 383 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol(); 384 OutStreamer->emitInstruction( 385 MCInstBuilder(AArch64::Bcc) 386 .addImm(AArch64CC::NE) 387 .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym, 388 OutContext)), 389 *STI); 390 MCSymbol *ReturnSym = OutContext.createTempSymbol(); 391 OutStreamer->emitLabel(ReturnSym); 392 OutStreamer->emitInstruction( 393 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI); 394 OutStreamer->emitLabel(HandleMismatchOrPartialSym); 395 396 if (HasMatchAllTag) { 397 OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri) 398 .addReg(AArch64::X16) 399 .addReg(Reg) 400 .addImm(56) 401 .addImm(63), 402 *STI); 403 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri) 404 .addReg(AArch64::XZR) 405 .addReg(AArch64::X16) 406 .addImm(MatchAllTag) 407 .addImm(0), 408 *STI); 409 OutStreamer->emitInstruction( 410 MCInstBuilder(AArch64::Bcc) 411 .addImm(AArch64CC::EQ) 412 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)), 413 *STI); 414 } 415 416 if (IsShort) { 417 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri) 418 .addReg(AArch64::WZR) 419 .addReg(AArch64::W16) 420 .addImm(15) 421 .addImm(0), 422 *STI); 423 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol(); 424 OutStreamer->emitInstruction( 425 MCInstBuilder(AArch64::Bcc) 426 .addImm(AArch64CC::HI) 427 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)), 428 *STI); 429 430 OutStreamer->emitInstruction( 431 MCInstBuilder(AArch64::ANDXri) 432 .addReg(AArch64::X17) 433 .addReg(Reg) 434 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)), 435 *STI); 436 if (Size != 1) 437 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri) 438 .addReg(AArch64::X17) 439 .addReg(AArch64::X17) 440 .addImm(Size - 1) 441 .addImm(0), 442 *STI); 443 OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs) 444 .addReg(AArch64::WZR) 445 .addReg(AArch64::W16) 446 .addReg(AArch64::W17) 447 .addImm(0), 448 *STI); 449 OutStreamer->emitInstruction( 450 MCInstBuilder(AArch64::Bcc) 451 .addImm(AArch64CC::LS) 452 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)), 453 *STI); 454 455 OutStreamer->emitInstruction( 456 MCInstBuilder(AArch64::ORRXri) 457 .addReg(AArch64::X16) 458 .addReg(Reg) 459 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)), 460 *STI); 461 OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui) 462 .addReg(AArch64::W16) 463 .addReg(AArch64::X16) 464 .addImm(0), 465 *STI); 466 OutStreamer->emitInstruction( 467 MCInstBuilder(AArch64::SUBSXrs) 468 .addReg(AArch64::XZR) 469 .addReg(AArch64::X16) 470 .addReg(Reg) 471 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)), 472 *STI); 473 OutStreamer->emitInstruction( 474 MCInstBuilder(AArch64::Bcc) 475 .addImm(AArch64CC::EQ) 476 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)), 477 *STI); 478 479 OutStreamer->emitLabel(HandleMismatchSym); 480 } 481 482 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre) 483 .addReg(AArch64::SP) 484 .addReg(AArch64::X0) 485 .addReg(AArch64::X1) 486 .addReg(AArch64::SP) 487 .addImm(-32), 488 *STI); 489 OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi) 490 .addReg(AArch64::FP) 491 .addReg(AArch64::LR) 492 .addReg(AArch64::SP) 493 .addImm(29), 494 *STI); 495 496 if (Reg != AArch64::X0) 497 OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs) 498 .addReg(AArch64::X0) 499 .addReg(AArch64::XZR) 500 .addReg(Reg) 501 .addImm(0), 502 *STI); 503 OutStreamer->emitInstruction( 504 MCInstBuilder(AArch64::MOVZXi) 505 .addReg(AArch64::X1) 506 .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask) 507 .addImm(0), 508 *STI); 509 510 if (CompileKernel) { 511 // The Linux kernel's dynamic loader doesn't support GOT relative 512 // relocations, but it doesn't support late binding either, so just call 513 // the function directly. 514 OutStreamer->emitInstruction( 515 MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI); 516 } else { 517 // Intentionally load the GOT entry and branch to it, rather than possibly 518 // late binding the function, which may clobber the registers before we 519 // have a chance to save them. 520 OutStreamer->emitInstruction( 521 MCInstBuilder(AArch64::ADRP) 522 .addReg(AArch64::X16) 523 .addExpr(AArch64MCExpr::create( 524 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE, 525 OutContext)), 526 *STI); 527 OutStreamer->emitInstruction( 528 MCInstBuilder(AArch64::LDRXui) 529 .addReg(AArch64::X16) 530 .addReg(AArch64::X16) 531 .addExpr(AArch64MCExpr::create( 532 HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12, 533 OutContext)), 534 *STI); 535 OutStreamer->emitInstruction( 536 MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI); 537 } 538 } 539 } 540 541 void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) { 542 EmitHwasanMemaccessSymbols(M); 543 544 const Triple &TT = TM.getTargetTriple(); 545 if (TT.isOSBinFormatMachO()) { 546 // Funny Darwin hack: This flag tells the linker that no global symbols 547 // contain code that falls through to other global symbols (e.g. the obvious 548 // implementation of multiple entry points). If this doesn't occur, the 549 // linker can safely perform dead code stripping. Since LLVM never 550 // generates code that does this, it is always safe to set. 551 OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols); 552 } 553 554 // Emit stack and fault map information. 555 emitStackMaps(SM); 556 FM.serializeToFaultMapSection(); 557 558 } 559 560 void AArch64AsmPrinter::EmitLOHs() { 561 SmallVector<MCSymbol *, 3> MCArgs; 562 563 for (const auto &D : AArch64FI->getLOHContainer()) { 564 for (const MachineInstr *MI : D.getArgs()) { 565 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI); 566 assert(LabelIt != LOHInstToLabel.end() && 567 "Label hasn't been inserted for LOH related instruction"); 568 MCArgs.push_back(LabelIt->second); 569 } 570 OutStreamer->emitLOHDirective(D.getKind(), MCArgs); 571 MCArgs.clear(); 572 } 573 } 574 575 void AArch64AsmPrinter::emitFunctionBodyEnd() { 576 if (!AArch64FI->getLOHRelated().empty()) 577 EmitLOHs(); 578 } 579 580 /// GetCPISymbol - Return the symbol for the specified constant pool entry. 581 MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const { 582 // Darwin uses a linker-private symbol name for constant-pools (to 583 // avoid addends on the relocation?), ELF has no such concept and 584 // uses a normal private symbol. 585 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty()) 586 return OutContext.getOrCreateSymbol( 587 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" + 588 Twine(getFunctionNumber()) + "_" + Twine(CPID)); 589 590 return AsmPrinter::GetCPISymbol(CPID); 591 } 592 593 void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum, 594 raw_ostream &O) { 595 const MachineOperand &MO = MI->getOperand(OpNum); 596 switch (MO.getType()) { 597 default: 598 llvm_unreachable("<unknown operand type>"); 599 case MachineOperand::MO_Register: { 600 Register Reg = MO.getReg(); 601 assert(Register::isPhysicalRegister(Reg)); 602 assert(!MO.getSubReg() && "Subregs should be eliminated!"); 603 O << AArch64InstPrinter::getRegisterName(Reg); 604 break; 605 } 606 case MachineOperand::MO_Immediate: { 607 O << MO.getImm(); 608 break; 609 } 610 case MachineOperand::MO_GlobalAddress: { 611 PrintSymbolOperand(MO, O); 612 break; 613 } 614 case MachineOperand::MO_BlockAddress: { 615 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress()); 616 Sym->print(O, MAI); 617 break; 618 } 619 } 620 } 621 622 bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode, 623 raw_ostream &O) { 624 Register Reg = MO.getReg(); 625 switch (Mode) { 626 default: 627 return true; // Unknown mode. 628 case 'w': 629 Reg = getWRegFromXReg(Reg); 630 break; 631 case 'x': 632 Reg = getXRegFromWReg(Reg); 633 break; 634 } 635 636 O << AArch64InstPrinter::getRegisterName(Reg); 637 return false; 638 } 639 640 // Prints the register in MO using class RC using the offset in the 641 // new register class. This should not be used for cross class 642 // printing. 643 bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO, 644 const TargetRegisterClass *RC, 645 unsigned AltName, raw_ostream &O) { 646 assert(MO.isReg() && "Should only get here with a register!"); 647 const TargetRegisterInfo *RI = STI->getRegisterInfo(); 648 Register Reg = MO.getReg(); 649 unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg)); 650 if (!RI->regsOverlap(RegToPrint, Reg)) 651 return true; 652 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName); 653 return false; 654 } 655 656 bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, 657 const char *ExtraCode, raw_ostream &O) { 658 const MachineOperand &MO = MI->getOperand(OpNum); 659 660 // First try the generic code, which knows about modifiers like 'c' and 'n'. 661 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O)) 662 return false; 663 664 // Does this asm operand have a single letter operand modifier? 665 if (ExtraCode && ExtraCode[0]) { 666 if (ExtraCode[1] != 0) 667 return true; // Unknown modifier. 668 669 switch (ExtraCode[0]) { 670 default: 671 return true; // Unknown modifier. 672 case 'w': // Print W register 673 case 'x': // Print X register 674 if (MO.isReg()) 675 return printAsmMRegister(MO, ExtraCode[0], O); 676 if (MO.isImm() && MO.getImm() == 0) { 677 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR; 678 O << AArch64InstPrinter::getRegisterName(Reg); 679 return false; 680 } 681 printOperand(MI, OpNum, O); 682 return false; 683 case 'b': // Print B register. 684 case 'h': // Print H register. 685 case 's': // Print S register. 686 case 'd': // Print D register. 687 case 'q': // Print Q register. 688 case 'z': // Print Z register. 689 if (MO.isReg()) { 690 const TargetRegisterClass *RC; 691 switch (ExtraCode[0]) { 692 case 'b': 693 RC = &AArch64::FPR8RegClass; 694 break; 695 case 'h': 696 RC = &AArch64::FPR16RegClass; 697 break; 698 case 's': 699 RC = &AArch64::FPR32RegClass; 700 break; 701 case 'd': 702 RC = &AArch64::FPR64RegClass; 703 break; 704 case 'q': 705 RC = &AArch64::FPR128RegClass; 706 break; 707 case 'z': 708 RC = &AArch64::ZPRRegClass; 709 break; 710 default: 711 return true; 712 } 713 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O); 714 } 715 printOperand(MI, OpNum, O); 716 return false; 717 } 718 } 719 720 // According to ARM, we should emit x and v registers unless we have a 721 // modifier. 722 if (MO.isReg()) { 723 Register Reg = MO.getReg(); 724 725 // If this is a w or x register, print an x register. 726 if (AArch64::GPR32allRegClass.contains(Reg) || 727 AArch64::GPR64allRegClass.contains(Reg)) 728 return printAsmMRegister(MO, 'x', O); 729 730 unsigned AltName = AArch64::NoRegAltName; 731 const TargetRegisterClass *RegClass; 732 if (AArch64::ZPRRegClass.contains(Reg)) { 733 RegClass = &AArch64::ZPRRegClass; 734 } else if (AArch64::PPRRegClass.contains(Reg)) { 735 RegClass = &AArch64::PPRRegClass; 736 } else { 737 RegClass = &AArch64::FPR128RegClass; 738 AltName = AArch64::vreg; 739 } 740 741 // If this is a b, h, s, d, or q register, print it as a v register. 742 return printAsmRegInClass(MO, RegClass, AltName, O); 743 } 744 745 printOperand(MI, OpNum, O); 746 return false; 747 } 748 749 bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, 750 unsigned OpNum, 751 const char *ExtraCode, 752 raw_ostream &O) { 753 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a') 754 return true; // Unknown modifier. 755 756 const MachineOperand &MO = MI->getOperand(OpNum); 757 assert(MO.isReg() && "unexpected inline asm memory operand"); 758 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]"; 759 return false; 760 } 761 762 void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI, 763 raw_ostream &OS) { 764 unsigned NOps = MI->getNumOperands(); 765 assert(NOps == 4); 766 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; 767 // cast away const; DIetc do not take const operands for some reason. 768 OS << MI->getDebugVariable()->getName(); 769 OS << " <- "; 770 // Frame address. Currently handles register +- offset only. 771 assert(MI->getDebugOperand(0).isReg() && MI->isDebugOffsetImm()); 772 OS << '['; 773 printOperand(MI, 0, OS); 774 OS << '+'; 775 printOperand(MI, 1, OS); 776 OS << ']'; 777 OS << "+"; 778 printOperand(MI, NOps - 2, OS); 779 } 780 781 void AArch64AsmPrinter::emitJumpTableInfo() { 782 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 783 if (!MJTI) return; 784 785 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 786 if (JT.empty()) return; 787 788 const Function &F = MF->getFunction(); 789 const TargetLoweringObjectFile &TLOF = getObjFileLowering(); 790 bool JTInDiffSection = 791 !STI->isTargetCOFF() || 792 !TLOF.shouldPutJumpTableInFunctionSection( 793 MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32, 794 F); 795 if (JTInDiffSection) { 796 // Drop it in the readonly section. 797 MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(F, TM); 798 OutStreamer->SwitchSection(ReadOnlySec); 799 } 800 801 auto AFI = MF->getInfo<AArch64FunctionInfo>(); 802 for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) { 803 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs; 804 805 // If this jump table was deleted, ignore it. 806 if (JTBBs.empty()) continue; 807 808 unsigned Size = AFI->getJumpTableEntrySize(JTI); 809 emitAlignment(Align(Size)); 810 OutStreamer->emitLabel(GetJTISymbol(JTI)); 811 812 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI); 813 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext); 814 815 for (auto *JTBB : JTBBs) { 816 const MCExpr *Value = 817 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext); 818 819 // Each entry is: 820 // .byte/.hword (LBB - Lbase)>>2 821 // or plain: 822 // .word LBB - Lbase 823 Value = MCBinaryExpr::createSub(Value, Base, OutContext); 824 if (Size != 4) 825 Value = MCBinaryExpr::createLShr( 826 Value, MCConstantExpr::create(2, OutContext), OutContext); 827 828 OutStreamer->emitValue(Value, Size); 829 } 830 } 831 } 832 833 void AArch64AsmPrinter::emitFunctionEntryLabel() { 834 if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall || 835 MF->getFunction().getCallingConv() == 836 CallingConv::AArch64_SVE_VectorCall || 837 STI->getRegisterInfo()->hasSVEArgsOrReturn(MF)) { 838 auto *TS = 839 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer()); 840 TS->emitDirectiveVariantPCS(CurrentFnSym); 841 } 842 843 return AsmPrinter::emitFunctionEntryLabel(); 844 } 845 846 /// Small jump tables contain an unsigned byte or half, representing the offset 847 /// from the lowest-addressed possible destination to the desired basic 848 /// block. Since all instructions are 4-byte aligned, this is further compressed 849 /// by counting in instructions rather than bytes (i.e. divided by 4). So, to 850 /// materialize the correct destination we need: 851 /// 852 /// adr xDest, .LBB0_0 853 /// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh). 854 /// add xDest, xDest, xScratch (with "lsl #2" for smaller entries) 855 void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer, 856 const llvm::MachineInstr &MI) { 857 Register DestReg = MI.getOperand(0).getReg(); 858 Register ScratchReg = MI.getOperand(1).getReg(); 859 Register ScratchRegW = 860 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32); 861 Register TableReg = MI.getOperand(2).getReg(); 862 Register EntryReg = MI.getOperand(3).getReg(); 863 int JTIdx = MI.getOperand(4).getIndex(); 864 int Size = AArch64FI->getJumpTableEntrySize(JTIdx); 865 866 // This has to be first because the compression pass based its reachability 867 // calculations on the start of the JumpTableDest instruction. 868 auto Label = 869 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx); 870 871 // If we don't already have a symbol to use as the base, use the ADR 872 // instruction itself. 873 if (!Label) { 874 Label = MF->getContext().createTempSymbol(); 875 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label); 876 OutStreamer.emitLabel(Label); 877 } 878 879 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext()); 880 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR) 881 .addReg(DestReg) 882 .addExpr(LabelExpr)); 883 884 // Load the number of instruction-steps to offset from the label. 885 unsigned LdrOpcode; 886 switch (Size) { 887 case 1: LdrOpcode = AArch64::LDRBBroX; break; 888 case 2: LdrOpcode = AArch64::LDRHHroX; break; 889 case 4: LdrOpcode = AArch64::LDRSWroX; break; 890 default: 891 llvm_unreachable("Unknown jump table size"); 892 } 893 894 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode) 895 .addReg(Size == 4 ? ScratchReg : ScratchRegW) 896 .addReg(TableReg) 897 .addReg(EntryReg) 898 .addImm(0) 899 .addImm(Size == 1 ? 0 : 1)); 900 901 // Add to the already materialized base label address, multiplying by 4 if 902 // compressed. 903 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs) 904 .addReg(DestReg) 905 .addReg(DestReg) 906 .addReg(ScratchReg) 907 .addImm(Size == 4 ? 0 : 2)); 908 } 909 910 void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, 911 const MachineInstr &MI) { 912 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes(); 913 914 auto &Ctx = OutStreamer.getContext(); 915 MCSymbol *MILabel = Ctx.createTempSymbol(); 916 OutStreamer.emitLabel(MILabel); 917 918 SM.recordStackMap(*MILabel, MI); 919 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); 920 921 // Scan ahead to trim the shadow. 922 const MachineBasicBlock &MBB = *MI.getParent(); 923 MachineBasicBlock::const_iterator MII(MI); 924 ++MII; 925 while (NumNOPBytes > 0) { 926 if (MII == MBB.end() || MII->isCall() || 927 MII->getOpcode() == AArch64::DBG_VALUE || 928 MII->getOpcode() == TargetOpcode::PATCHPOINT || 929 MII->getOpcode() == TargetOpcode::STACKMAP) 930 break; 931 ++MII; 932 NumNOPBytes -= 4; 933 } 934 935 // Emit nops. 936 for (unsigned i = 0; i < NumNOPBytes; i += 4) 937 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0)); 938 } 939 940 // Lower a patchpoint of the form: 941 // [<def>], <id>, <numBytes>, <target>, <numArgs> 942 void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, 943 const MachineInstr &MI) { 944 auto &Ctx = OutStreamer.getContext(); 945 MCSymbol *MILabel = Ctx.createTempSymbol(); 946 OutStreamer.emitLabel(MILabel); 947 SM.recordPatchPoint(*MILabel, MI); 948 949 PatchPointOpers Opers(&MI); 950 951 int64_t CallTarget = Opers.getCallTarget().getImm(); 952 unsigned EncodedBytes = 0; 953 if (CallTarget) { 954 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget && 955 "High 16 bits of call target should be zero."); 956 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg(); 957 EncodedBytes = 16; 958 // Materialize the jump address: 959 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi) 960 .addReg(ScratchReg) 961 .addImm((CallTarget >> 32) & 0xFFFF) 962 .addImm(32)); 963 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi) 964 .addReg(ScratchReg) 965 .addReg(ScratchReg) 966 .addImm((CallTarget >> 16) & 0xFFFF) 967 .addImm(16)); 968 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi) 969 .addReg(ScratchReg) 970 .addReg(ScratchReg) 971 .addImm(CallTarget & 0xFFFF) 972 .addImm(0)); 973 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg)); 974 } 975 // Emit padding. 976 unsigned NumBytes = Opers.getNumPatchBytes(); 977 assert(NumBytes >= EncodedBytes && 978 "Patchpoint can't request size less than the length of a call."); 979 assert((NumBytes - EncodedBytes) % 4 == 0 && 980 "Invalid number of NOP bytes requested!"); 981 for (unsigned i = EncodedBytes; i < NumBytes; i += 4) 982 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0)); 983 } 984 985 void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM, 986 const MachineInstr &MI) { 987 StatepointOpers SOpers(&MI); 988 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { 989 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); 990 for (unsigned i = 0; i < PatchBytes; i += 4) 991 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0)); 992 } else { 993 // Lower call target and choose correct opcode 994 const MachineOperand &CallTarget = SOpers.getCallTarget(); 995 MCOperand CallTargetMCOp; 996 unsigned CallOpcode; 997 switch (CallTarget.getType()) { 998 case MachineOperand::MO_GlobalAddress: 999 case MachineOperand::MO_ExternalSymbol: 1000 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp); 1001 CallOpcode = AArch64::BL; 1002 break; 1003 case MachineOperand::MO_Immediate: 1004 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); 1005 CallOpcode = AArch64::BL; 1006 break; 1007 case MachineOperand::MO_Register: 1008 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); 1009 CallOpcode = AArch64::BLR; 1010 break; 1011 default: 1012 llvm_unreachable("Unsupported operand type in statepoint call target"); 1013 break; 1014 } 1015 1016 EmitToStreamer(OutStreamer, 1017 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp)); 1018 } 1019 1020 auto &Ctx = OutStreamer.getContext(); 1021 MCSymbol *MILabel = Ctx.createTempSymbol(); 1022 OutStreamer.emitLabel(MILabel); 1023 SM.recordStatepoint(*MILabel, MI); 1024 } 1025 1026 void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) { 1027 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, 1028 // <opcode>, <operands> 1029 1030 Register DefRegister = FaultingMI.getOperand(0).getReg(); 1031 FaultMaps::FaultKind FK = 1032 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); 1033 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); 1034 unsigned Opcode = FaultingMI.getOperand(3).getImm(); 1035 unsigned OperandsBeginIdx = 4; 1036 1037 auto &Ctx = OutStreamer->getContext(); 1038 MCSymbol *FaultingLabel = Ctx.createTempSymbol(); 1039 OutStreamer->emitLabel(FaultingLabel); 1040 1041 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!"); 1042 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel); 1043 1044 MCInst MI; 1045 MI.setOpcode(Opcode); 1046 1047 if (DefRegister != (Register)0) 1048 MI.addOperand(MCOperand::createReg(DefRegister)); 1049 1050 for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, 1051 E = FaultingMI.operands_end(); 1052 I != E; ++I) { 1053 MCOperand Dest; 1054 lowerOperand(*I, Dest); 1055 MI.addOperand(Dest); 1056 } 1057 1058 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName()); 1059 OutStreamer->emitInstruction(MI, getSubtargetInfo()); 1060 } 1061 1062 void AArch64AsmPrinter::EmitFMov0(const MachineInstr &MI) { 1063 Register DestReg = MI.getOperand(0).getReg(); 1064 if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround()) { 1065 // Convert H/S/D register to corresponding Q register 1066 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31) 1067 DestReg = AArch64::Q0 + (DestReg - AArch64::H0); 1068 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31) 1069 DestReg = AArch64::Q0 + (DestReg - AArch64::S0); 1070 else { 1071 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31); 1072 DestReg = AArch64::Q0 + (DestReg - AArch64::D0); 1073 } 1074 MCInst MOVI; 1075 MOVI.setOpcode(AArch64::MOVIv2d_ns); 1076 MOVI.addOperand(MCOperand::createReg(DestReg)); 1077 MOVI.addOperand(MCOperand::createImm(0)); 1078 EmitToStreamer(*OutStreamer, MOVI); 1079 } else { 1080 MCInst FMov; 1081 switch (MI.getOpcode()) { 1082 default: llvm_unreachable("Unexpected opcode"); 1083 case AArch64::FMOVH0: 1084 FMov.setOpcode(AArch64::FMOVWHr); 1085 FMov.addOperand(MCOperand::createReg(DestReg)); 1086 FMov.addOperand(MCOperand::createReg(AArch64::WZR)); 1087 break; 1088 case AArch64::FMOVS0: 1089 FMov.setOpcode(AArch64::FMOVWSr); 1090 FMov.addOperand(MCOperand::createReg(DestReg)); 1091 FMov.addOperand(MCOperand::createReg(AArch64::WZR)); 1092 break; 1093 case AArch64::FMOVD0: 1094 FMov.setOpcode(AArch64::FMOVXDr); 1095 FMov.addOperand(MCOperand::createReg(DestReg)); 1096 FMov.addOperand(MCOperand::createReg(AArch64::XZR)); 1097 break; 1098 } 1099 EmitToStreamer(*OutStreamer, FMov); 1100 } 1101 } 1102 1103 // Simple pseudo-instructions have their lowering (with expansion to real 1104 // instructions) auto-generated. 1105 #include "AArch64GenMCPseudoLowering.inc" 1106 1107 void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) { 1108 // Do any auto-generated pseudo lowerings. 1109 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 1110 return; 1111 1112 if (AArch64FI->getLOHRelated().count(MI)) { 1113 // Generate a label for LOH related instruction 1114 MCSymbol *LOHLabel = createTempSymbol("loh"); 1115 // Associate the instruction with the label 1116 LOHInstToLabel[MI] = LOHLabel; 1117 OutStreamer->emitLabel(LOHLabel); 1118 } 1119 1120 AArch64TargetStreamer *TS = 1121 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer()); 1122 // Do any manual lowerings. 1123 switch (MI->getOpcode()) { 1124 default: 1125 break; 1126 case AArch64::HINT: { 1127 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for 1128 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be 1129 // non-empty. If MI is the initial BTI, place the 1130 // __patchable_function_entries label after BTI. 1131 if (CurrentPatchableFunctionEntrySym && 1132 CurrentPatchableFunctionEntrySym == CurrentFnBegin && 1133 MI == &MF->front().front()) { 1134 int64_t Imm = MI->getOperand(0).getImm(); 1135 if ((Imm & 32) && (Imm & 6)) { 1136 MCInst Inst; 1137 MCInstLowering.Lower(MI, Inst); 1138 EmitToStreamer(*OutStreamer, Inst); 1139 CurrentPatchableFunctionEntrySym = createTempSymbol("patch"); 1140 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym); 1141 return; 1142 } 1143 } 1144 break; 1145 } 1146 case AArch64::MOVMCSym: { 1147 Register DestReg = MI->getOperand(0).getReg(); 1148 const MachineOperand &MO_Sym = MI->getOperand(1); 1149 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym); 1150 MCOperand Hi_MCSym, Lo_MCSym; 1151 1152 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S); 1153 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC); 1154 1155 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym); 1156 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym); 1157 1158 MCInst MovZ; 1159 MovZ.setOpcode(AArch64::MOVZXi); 1160 MovZ.addOperand(MCOperand::createReg(DestReg)); 1161 MovZ.addOperand(Hi_MCSym); 1162 MovZ.addOperand(MCOperand::createImm(16)); 1163 EmitToStreamer(*OutStreamer, MovZ); 1164 1165 MCInst MovK; 1166 MovK.setOpcode(AArch64::MOVKXi); 1167 MovK.addOperand(MCOperand::createReg(DestReg)); 1168 MovK.addOperand(MCOperand::createReg(DestReg)); 1169 MovK.addOperand(Lo_MCSym); 1170 MovK.addOperand(MCOperand::createImm(0)); 1171 EmitToStreamer(*OutStreamer, MovK); 1172 return; 1173 } 1174 case AArch64::MOVIv2d_ns: 1175 // If the target has <rdar://problem/16473581>, lower this 1176 // instruction to movi.16b instead. 1177 if (STI->hasZeroCycleZeroingFPWorkaround() && 1178 MI->getOperand(1).getImm() == 0) { 1179 MCInst TmpInst; 1180 TmpInst.setOpcode(AArch64::MOVIv16b_ns); 1181 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); 1182 TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm())); 1183 EmitToStreamer(*OutStreamer, TmpInst); 1184 return; 1185 } 1186 break; 1187 1188 case AArch64::DBG_VALUE: { 1189 if (isVerbose() && OutStreamer->hasRawTextSupport()) { 1190 SmallString<128> TmpStr; 1191 raw_svector_ostream OS(TmpStr); 1192 PrintDebugValueComment(MI, OS); 1193 OutStreamer->emitRawText(StringRef(OS.str())); 1194 } 1195 return; 1196 1197 case AArch64::EMITBKEY: { 1198 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType(); 1199 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI && 1200 ExceptionHandlingType != ExceptionHandling::ARM) 1201 return; 1202 1203 if (needsCFIMoves() == CFI_M_None) 1204 return; 1205 1206 OutStreamer->emitCFIBKeyFrame(); 1207 return; 1208 } 1209 } 1210 1211 // Tail calls use pseudo instructions so they have the proper code-gen 1212 // attributes (isCall, isReturn, etc.). We lower them to the real 1213 // instruction here. 1214 case AArch64::TCRETURNri: 1215 case AArch64::TCRETURNriBTI: 1216 case AArch64::TCRETURNriALL: { 1217 MCInst TmpInst; 1218 TmpInst.setOpcode(AArch64::BR); 1219 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg())); 1220 EmitToStreamer(*OutStreamer, TmpInst); 1221 return; 1222 } 1223 case AArch64::TCRETURNdi: { 1224 MCOperand Dest; 1225 MCInstLowering.lowerOperand(MI->getOperand(0), Dest); 1226 MCInst TmpInst; 1227 TmpInst.setOpcode(AArch64::B); 1228 TmpInst.addOperand(Dest); 1229 EmitToStreamer(*OutStreamer, TmpInst); 1230 return; 1231 } 1232 case AArch64::SpeculationBarrierISBDSBEndBB: { 1233 // Print DSB SYS + ISB 1234 MCInst TmpInstDSB; 1235 TmpInstDSB.setOpcode(AArch64::DSB); 1236 TmpInstDSB.addOperand(MCOperand::createImm(0xf)); 1237 EmitToStreamer(*OutStreamer, TmpInstDSB); 1238 MCInst TmpInstISB; 1239 TmpInstISB.setOpcode(AArch64::ISB); 1240 TmpInstISB.addOperand(MCOperand::createImm(0xf)); 1241 EmitToStreamer(*OutStreamer, TmpInstISB); 1242 return; 1243 } 1244 case AArch64::SpeculationBarrierSBEndBB: { 1245 // Print SB 1246 MCInst TmpInstSB; 1247 TmpInstSB.setOpcode(AArch64::SB); 1248 EmitToStreamer(*OutStreamer, TmpInstSB); 1249 return; 1250 } 1251 case AArch64::TLSDESC_CALLSEQ: { 1252 /// lower this to: 1253 /// adrp x0, :tlsdesc:var 1254 /// ldr x1, [x0, #:tlsdesc_lo12:var] 1255 /// add x0, x0, #:tlsdesc_lo12:var 1256 /// .tlsdesccall var 1257 /// blr x1 1258 /// (TPIDR_EL0 offset now in x0) 1259 const MachineOperand &MO_Sym = MI->getOperand(0); 1260 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym); 1261 MCOperand Sym, SymTLSDescLo12, SymTLSDesc; 1262 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF); 1263 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE); 1264 MCInstLowering.lowerOperand(MO_Sym, Sym); 1265 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12); 1266 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc); 1267 1268 MCInst Adrp; 1269 Adrp.setOpcode(AArch64::ADRP); 1270 Adrp.addOperand(MCOperand::createReg(AArch64::X0)); 1271 Adrp.addOperand(SymTLSDesc); 1272 EmitToStreamer(*OutStreamer, Adrp); 1273 1274 MCInst Ldr; 1275 if (STI->isTargetILP32()) { 1276 Ldr.setOpcode(AArch64::LDRWui); 1277 Ldr.addOperand(MCOperand::createReg(AArch64::W1)); 1278 } else { 1279 Ldr.setOpcode(AArch64::LDRXui); 1280 Ldr.addOperand(MCOperand::createReg(AArch64::X1)); 1281 } 1282 Ldr.addOperand(MCOperand::createReg(AArch64::X0)); 1283 Ldr.addOperand(SymTLSDescLo12); 1284 Ldr.addOperand(MCOperand::createImm(0)); 1285 EmitToStreamer(*OutStreamer, Ldr); 1286 1287 MCInst Add; 1288 if (STI->isTargetILP32()) { 1289 Add.setOpcode(AArch64::ADDWri); 1290 Add.addOperand(MCOperand::createReg(AArch64::W0)); 1291 Add.addOperand(MCOperand::createReg(AArch64::W0)); 1292 } else { 1293 Add.setOpcode(AArch64::ADDXri); 1294 Add.addOperand(MCOperand::createReg(AArch64::X0)); 1295 Add.addOperand(MCOperand::createReg(AArch64::X0)); 1296 } 1297 Add.addOperand(SymTLSDescLo12); 1298 Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0))); 1299 EmitToStreamer(*OutStreamer, Add); 1300 1301 // Emit a relocation-annotation. This expands to no code, but requests 1302 // the following instruction gets an R_AARCH64_TLSDESC_CALL. 1303 MCInst TLSDescCall; 1304 TLSDescCall.setOpcode(AArch64::TLSDESCCALL); 1305 TLSDescCall.addOperand(Sym); 1306 EmitToStreamer(*OutStreamer, TLSDescCall); 1307 1308 MCInst Blr; 1309 Blr.setOpcode(AArch64::BLR); 1310 Blr.addOperand(MCOperand::createReg(AArch64::X1)); 1311 EmitToStreamer(*OutStreamer, Blr); 1312 1313 return; 1314 } 1315 1316 case AArch64::JumpTableDest32: 1317 case AArch64::JumpTableDest16: 1318 case AArch64::JumpTableDest8: 1319 LowerJumpTableDest(*OutStreamer, *MI); 1320 return; 1321 1322 case AArch64::FMOVH0: 1323 case AArch64::FMOVS0: 1324 case AArch64::FMOVD0: 1325 EmitFMov0(*MI); 1326 return; 1327 1328 case TargetOpcode::STACKMAP: 1329 return LowerSTACKMAP(*OutStreamer, SM, *MI); 1330 1331 case TargetOpcode::PATCHPOINT: 1332 return LowerPATCHPOINT(*OutStreamer, SM, *MI); 1333 1334 case TargetOpcode::STATEPOINT: 1335 return LowerSTATEPOINT(*OutStreamer, SM, *MI); 1336 1337 case TargetOpcode::FAULTING_OP: 1338 return LowerFAULTING_OP(*MI); 1339 1340 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: 1341 LowerPATCHABLE_FUNCTION_ENTER(*MI); 1342 return; 1343 1344 case TargetOpcode::PATCHABLE_FUNCTION_EXIT: 1345 LowerPATCHABLE_FUNCTION_EXIT(*MI); 1346 return; 1347 1348 case TargetOpcode::PATCHABLE_TAIL_CALL: 1349 LowerPATCHABLE_TAIL_CALL(*MI); 1350 return; 1351 1352 case AArch64::HWASAN_CHECK_MEMACCESS: 1353 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES: 1354 LowerHWASAN_CHECK_MEMACCESS(*MI); 1355 return; 1356 1357 case AArch64::SEH_StackAlloc: 1358 TS->EmitARM64WinCFIAllocStack(MI->getOperand(0).getImm()); 1359 return; 1360 1361 case AArch64::SEH_SaveFPLR: 1362 TS->EmitARM64WinCFISaveFPLR(MI->getOperand(0).getImm()); 1363 return; 1364 1365 case AArch64::SEH_SaveFPLR_X: 1366 assert(MI->getOperand(0).getImm() < 0 && 1367 "Pre increment SEH opcode must have a negative offset"); 1368 TS->EmitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm()); 1369 return; 1370 1371 case AArch64::SEH_SaveReg: 1372 TS->EmitARM64WinCFISaveReg(MI->getOperand(0).getImm(), 1373 MI->getOperand(1).getImm()); 1374 return; 1375 1376 case AArch64::SEH_SaveReg_X: 1377 assert(MI->getOperand(1).getImm() < 0 && 1378 "Pre increment SEH opcode must have a negative offset"); 1379 TS->EmitARM64WinCFISaveRegX(MI->getOperand(0).getImm(), 1380 -MI->getOperand(1).getImm()); 1381 return; 1382 1383 case AArch64::SEH_SaveRegP: 1384 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 && 1385 MI->getOperand(0).getImm() <= 28) { 1386 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 && 1387 "Register paired with LR must be odd"); 1388 TS->EmitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(), 1389 MI->getOperand(2).getImm()); 1390 return; 1391 } 1392 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) && 1393 "Non-consecutive registers not allowed for save_regp"); 1394 TS->EmitARM64WinCFISaveRegP(MI->getOperand(0).getImm(), 1395 MI->getOperand(2).getImm()); 1396 return; 1397 1398 case AArch64::SEH_SaveRegP_X: 1399 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) && 1400 "Non-consecutive registers not allowed for save_regp_x"); 1401 assert(MI->getOperand(2).getImm() < 0 && 1402 "Pre increment SEH opcode must have a negative offset"); 1403 TS->EmitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(), 1404 -MI->getOperand(2).getImm()); 1405 return; 1406 1407 case AArch64::SEH_SaveFReg: 1408 TS->EmitARM64WinCFISaveFReg(MI->getOperand(0).getImm(), 1409 MI->getOperand(1).getImm()); 1410 return; 1411 1412 case AArch64::SEH_SaveFReg_X: 1413 assert(MI->getOperand(1).getImm() < 0 && 1414 "Pre increment SEH opcode must have a negative offset"); 1415 TS->EmitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(), 1416 -MI->getOperand(1).getImm()); 1417 return; 1418 1419 case AArch64::SEH_SaveFRegP: 1420 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) && 1421 "Non-consecutive registers not allowed for save_regp"); 1422 TS->EmitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(), 1423 MI->getOperand(2).getImm()); 1424 return; 1425 1426 case AArch64::SEH_SaveFRegP_X: 1427 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) && 1428 "Non-consecutive registers not allowed for save_regp_x"); 1429 assert(MI->getOperand(2).getImm() < 0 && 1430 "Pre increment SEH opcode must have a negative offset"); 1431 TS->EmitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(), 1432 -MI->getOperand(2).getImm()); 1433 return; 1434 1435 case AArch64::SEH_SetFP: 1436 TS->EmitARM64WinCFISetFP(); 1437 return; 1438 1439 case AArch64::SEH_AddFP: 1440 TS->EmitARM64WinCFIAddFP(MI->getOperand(0).getImm()); 1441 return; 1442 1443 case AArch64::SEH_Nop: 1444 TS->EmitARM64WinCFINop(); 1445 return; 1446 1447 case AArch64::SEH_PrologEnd: 1448 TS->EmitARM64WinCFIPrologEnd(); 1449 return; 1450 1451 case AArch64::SEH_EpilogStart: 1452 TS->EmitARM64WinCFIEpilogStart(); 1453 return; 1454 1455 case AArch64::SEH_EpilogEnd: 1456 TS->EmitARM64WinCFIEpilogEnd(); 1457 return; 1458 } 1459 1460 // Finally, do the automated lowerings for everything else. 1461 MCInst TmpInst; 1462 MCInstLowering.Lower(MI, TmpInst); 1463 EmitToStreamer(*OutStreamer, TmpInst); 1464 } 1465 1466 // Force static initialization. 1467 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() { 1468 RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget()); 1469 RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget()); 1470 RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target()); 1471 RegisterAsmPrinter<AArch64AsmPrinter> W(getTheARM64_32Target()); 1472 RegisterAsmPrinter<AArch64AsmPrinter> V(getTheAArch64_32Target()); 1473 } 1474