xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp (revision 95eb4b873b6a8b527c5bd78d7191975dfca38998)
1 //===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a printer that converts from our internal representation
10 // of machine-dependent LLVM code to the AArch64 assembly language.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64.h"
15 #include "AArch64MCInstLower.h"
16 #include "AArch64MachineFunctionInfo.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "MCTargetDesc/AArch64InstPrinter.h"
22 #include "MCTargetDesc/AArch64MCExpr.h"
23 #include "MCTargetDesc/AArch64MCTargetDesc.h"
24 #include "MCTargetDesc/AArch64TargetStreamer.h"
25 #include "TargetInfo/AArch64TargetInfo.h"
26 #include "Utils/AArch64BaseInfo.h"
27 #include "llvm/ADT/SmallString.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/BinaryFormat/COFF.h"
32 #include "llvm/BinaryFormat/ELF.h"
33 #include "llvm/BinaryFormat/MachO.h"
34 #include "llvm/CodeGen/AsmPrinter.h"
35 #include "llvm/CodeGen/FaultMaps.h"
36 #include "llvm/CodeGen/MachineBasicBlock.h"
37 #include "llvm/CodeGen/MachineFunction.h"
38 #include "llvm/CodeGen/MachineInstr.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/StackMaps.h"
43 #include "llvm/CodeGen/TargetRegisterInfo.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/MC/MCAsmInfo.h"
47 #include "llvm/MC/MCContext.h"
48 #include "llvm/MC/MCInst.h"
49 #include "llvm/MC/MCInstBuilder.h"
50 #include "llvm/MC/MCSectionELF.h"
51 #include "llvm/MC/MCSectionMachO.h"
52 #include "llvm/MC/MCStreamer.h"
53 #include "llvm/MC/MCSymbol.h"
54 #include "llvm/MC/TargetRegistry.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Target/TargetMachine.h"
60 #include "llvm/TargetParser/Triple.h"
61 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <cstdint>
65 #include <map>
66 #include <memory>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "asm-printer"
71 
72 namespace {
73 
74 class AArch64AsmPrinter : public AsmPrinter {
75   AArch64MCInstLower MCInstLowering;
76   FaultMaps FM;
77   const AArch64Subtarget *STI;
78   bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
79 
80 public:
81   AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
82       : AsmPrinter(TM, std::move(Streamer)), MCInstLowering(OutContext, *this),
83         FM(*this) {}
84 
85   StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
86 
87   /// Wrapper for MCInstLowering.lowerOperand() for the
88   /// tblgen'erated pseudo lowering.
89   bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
90     return MCInstLowering.lowerOperand(MO, MCOp);
91   }
92 
93   void emitStartOfAsmFile(Module &M) override;
94   void emitJumpTableInfo() override;
95   std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
96              codeview::JumpTableEntrySize>
97   getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
98                            const MCSymbol *BranchLabel) const override;
99 
100   void emitFunctionEntryLabel() override;
101 
102   void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
103 
104   void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
105 
106   void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
107                      const MachineInstr &MI);
108   void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
109                        const MachineInstr &MI);
110   void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
111                        const MachineInstr &MI);
112   void LowerFAULTING_OP(const MachineInstr &MI);
113 
114   void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
115   void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
116   void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
117   void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
118 
119   typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple;
120   std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
121   void LowerKCFI_CHECK(const MachineInstr &MI);
122   void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
123   void emitHwasanMemaccessSymbols(Module &M);
124 
125   void emitSled(const MachineInstr &MI, SledKind Kind);
126 
127   /// tblgen'erated driver function for lowering simple MI->MC
128   /// pseudo instructions.
129   bool emitPseudoExpansionLowering(MCStreamer &OutStreamer,
130                                    const MachineInstr *MI);
131 
132   void emitInstruction(const MachineInstr *MI) override;
133 
134   void emitFunctionHeaderComment() override;
135 
136   void getAnalysisUsage(AnalysisUsage &AU) const override {
137     AsmPrinter::getAnalysisUsage(AU);
138     AU.setPreservesAll();
139   }
140 
141   bool runOnMachineFunction(MachineFunction &MF) override {
142     AArch64FI = MF.getInfo<AArch64FunctionInfo>();
143     STI = &MF.getSubtarget<AArch64Subtarget>();
144 
145     SetupMachineFunction(MF);
146 
147     if (STI->isTargetCOFF()) {
148       bool Local = MF.getFunction().hasLocalLinkage();
149       COFF::SymbolStorageClass Scl =
150           Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL;
151       int Type =
152         COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
153 
154       OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
155       OutStreamer->emitCOFFSymbolStorageClass(Scl);
156       OutStreamer->emitCOFFSymbolType(Type);
157       OutStreamer->endCOFFSymbolDef();
158     }
159 
160     // Emit the rest of the function body.
161     emitFunctionBody();
162 
163     // Emit the XRay table for this function.
164     emitXRayTable();
165 
166     // We didn't modify anything.
167     return false;
168   }
169 
170   const MCExpr *lowerConstant(const Constant *CV) override;
171 
172 private:
173   void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
174   bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
175   bool printAsmRegInClass(const MachineOperand &MO,
176                           const TargetRegisterClass *RC, unsigned AltName,
177                           raw_ostream &O);
178 
179   bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
180                        const char *ExtraCode, raw_ostream &O) override;
181   bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
182                              const char *ExtraCode, raw_ostream &O) override;
183 
184   void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
185 
186   void emitFunctionBodyEnd() override;
187 
188   MCSymbol *GetCPISymbol(unsigned CPID) const override;
189   void emitEndOfAsmFile(Module &M) override;
190 
191   AArch64FunctionInfo *AArch64FI = nullptr;
192 
193   /// Emit the LOHs contained in AArch64FI.
194   void emitLOHs();
195 
196   /// Emit instruction to set float register to zero.
197   void emitFMov0(const MachineInstr &MI);
198 
199   using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
200 
201   MInstToMCSymbol LOHInstToLabel;
202 
203   bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const override {
204     return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
205   }
206 
207   const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
208     assert(STI);
209     return STI;
210   }
211   void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
212                               MCSymbol *LazyPointer) override;
213   void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI,
214                                     MCSymbol *LazyPointer) override;
215 };
216 
217 } // end anonymous namespace
218 
219 void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
220   const Triple &TT = TM.getTargetTriple();
221 
222   if (TT.isOSBinFormatCOFF()) {
223     // Emit an absolute @feat.00 symbol
224     MCSymbol *S = MMI->getContext().getOrCreateSymbol(StringRef("@feat.00"));
225     OutStreamer->beginCOFFSymbolDef(S);
226     OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_STATIC);
227     OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_NULL);
228     OutStreamer->endCOFFSymbolDef();
229     int64_t Feat00Value = 0;
230 
231     if (M.getModuleFlag("cfguard")) {
232       // Object is CFG-aware.
233       Feat00Value |= COFF::Feat00Flags::GuardCF;
234     }
235 
236     if (M.getModuleFlag("ehcontguard")) {
237       // Object also has EHCont.
238       Feat00Value |= COFF::Feat00Flags::GuardEHCont;
239     }
240 
241     if (M.getModuleFlag("ms-kernel")) {
242       // Object is compiled with /kernel.
243       Feat00Value |= COFF::Feat00Flags::Kernel;
244     }
245 
246     OutStreamer->emitSymbolAttribute(S, MCSA_Global);
247     OutStreamer->emitAssignment(
248         S, MCConstantExpr::create(Feat00Value, MMI->getContext()));
249   }
250 
251   if (!TT.isOSBinFormatELF())
252     return;
253 
254   // Assemble feature flags that may require creation of a note section.
255   unsigned Flags = 0;
256   if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
257           M.getModuleFlag("branch-target-enforcement")))
258     if (BTE->getZExtValue())
259       Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_BTI;
260 
261   if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
262           M.getModuleFlag("guarded-control-stack")))
263     if (GCS->getZExtValue())
264       Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_GCS;
265 
266   if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
267           M.getModuleFlag("sign-return-address")))
268     if (Sign->getZExtValue())
269       Flags |= ELF::GNU_PROPERTY_AARCH64_FEATURE_1_PAC;
270 
271   if (Flags == 0)
272     return;
273 
274   // Emit a .note.gnu.property section with the flags.
275   auto *TS =
276       static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
277   TS->emitNoteSection(Flags);
278 }
279 
280 void AArch64AsmPrinter::emitFunctionHeaderComment() {
281   const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
282   std::optional<std::string> OutlinerString = FI->getOutliningStyle();
283   if (OutlinerString != std::nullopt)
284     OutStreamer->getCommentOS() << ' ' << OutlinerString;
285 }
286 
287 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
288 {
289   const Function &F = MF->getFunction();
290   if (F.hasFnAttribute("patchable-function-entry")) {
291     unsigned Num;
292     if (F.getFnAttribute("patchable-function-entry")
293             .getValueAsString()
294             .getAsInteger(10, Num))
295       return;
296     emitNops(Num);
297     return;
298   }
299 
300   emitSled(MI, SledKind::FUNCTION_ENTER);
301 }
302 
303 void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
304   emitSled(MI, SledKind::FUNCTION_EXIT);
305 }
306 
307 void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
308   emitSled(MI, SledKind::TAIL_CALL);
309 }
310 
311 void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
312   static const int8_t NoopsInSledCount = 7;
313   // We want to emit the following pattern:
314   //
315   // .Lxray_sled_N:
316   //   ALIGN
317   //   B #32
318   //   ; 7 NOP instructions (28 bytes)
319   // .tmpN
320   //
321   // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
322   // over the full 32 bytes (8 instructions) with the following pattern:
323   //
324   //   STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
325   //   LDR W17, #12 ; W17 := function ID
326   //   LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
327   //   BLR X16 ; call the tracing trampoline
328   //   ;DATA: 32 bits of function ID
329   //   ;DATA: lower 32 bits of the address of the trampoline
330   //   ;DATA: higher 32 bits of the address of the trampoline
331   //   LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
332   //
333   OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
334   auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
335   OutStreamer->emitLabel(CurSled);
336   auto Target = OutContext.createTempSymbol();
337 
338   // Emit "B #32" instruction, which jumps over the next 28 bytes.
339   // The operand has to be the number of 4-byte instructions to jump over,
340   // including the current instruction.
341   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
342 
343   for (int8_t I = 0; I < NoopsInSledCount; I++)
344     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
345 
346   OutStreamer->emitLabel(Target);
347   recordSled(CurSled, MI, Kind, 2);
348 }
349 
350 // Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
351 // (built-in functions __xray_customevent/__xray_typedevent).
352 //
353 // .Lxray_event_sled_N:
354 //   b 1f
355 //   save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
356 //   set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
357 //   bl __xray_CustomEvent or __xray_TypedEvent
358 //   restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
359 // 1:
360 //
361 // There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
362 //
363 // Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
364 // After patching, b .+N will become a nop.
365 void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
366                                                   bool Typed) {
367   auto &O = *OutStreamer;
368   MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
369   O.emitLabel(CurSled);
370   MCInst MovX0Op0 = MCInstBuilder(AArch64::ORRXrs)
371                         .addReg(AArch64::X0)
372                         .addReg(AArch64::XZR)
373                         .addReg(MI.getOperand(0).getReg())
374                         .addImm(0);
375   MCInst MovX1Op1 = MCInstBuilder(AArch64::ORRXrs)
376                         .addReg(AArch64::X1)
377                         .addReg(AArch64::XZR)
378                         .addReg(MI.getOperand(1).getReg())
379                         .addImm(0);
380   bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
381   auto *Sym = MCSymbolRefExpr::create(
382       OutContext.getOrCreateSymbol(
383           Twine(MachO ? "_" : "") +
384           (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
385       OutContext);
386   if (Typed) {
387     O.AddComment("Begin XRay typed event");
388     EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
389     EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
390                           .addReg(AArch64::SP)
391                           .addReg(AArch64::X0)
392                           .addReg(AArch64::X1)
393                           .addReg(AArch64::SP)
394                           .addImm(-4));
395     EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
396                           .addReg(AArch64::X2)
397                           .addReg(AArch64::SP)
398                           .addImm(2));
399     EmitToStreamer(O, MovX0Op0);
400     EmitToStreamer(O, MovX1Op1);
401     EmitToStreamer(O, MCInstBuilder(AArch64::ORRXrs)
402                           .addReg(AArch64::X2)
403                           .addReg(AArch64::XZR)
404                           .addReg(MI.getOperand(2).getReg())
405                           .addImm(0));
406     EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
407     EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
408                           .addReg(AArch64::X2)
409                           .addReg(AArch64::SP)
410                           .addImm(2));
411     O.AddComment("End XRay typed event");
412     EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
413                           .addReg(AArch64::SP)
414                           .addReg(AArch64::X0)
415                           .addReg(AArch64::X1)
416                           .addReg(AArch64::SP)
417                           .addImm(4));
418 
419     recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
420   } else {
421     O.AddComment("Begin XRay custom event");
422     EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
423     EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
424                           .addReg(AArch64::SP)
425                           .addReg(AArch64::X0)
426                           .addReg(AArch64::X1)
427                           .addReg(AArch64::SP)
428                           .addImm(-2));
429     EmitToStreamer(O, MovX0Op0);
430     EmitToStreamer(O, MovX1Op1);
431     EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
432     O.AddComment("End XRay custom event");
433     EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
434                           .addReg(AArch64::SP)
435                           .addReg(AArch64::X0)
436                           .addReg(AArch64::X1)
437                           .addReg(AArch64::SP)
438                           .addImm(2));
439 
440     recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
441   }
442 }
443 
444 void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
445   Register AddrReg = MI.getOperand(0).getReg();
446   assert(std::next(MI.getIterator())->isCall() &&
447          "KCFI_CHECK not followed by a call instruction");
448   assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
449          "KCFI_CHECK call target doesn't match call operand");
450 
451   // Default to using the intra-procedure-call temporary registers for
452   // comparing the hashes.
453   unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
454   if (AddrReg == AArch64::XZR) {
455     // Checking XZR makes no sense. Instead of emitting a load, zero
456     // ScratchRegs[0] and use it for the ESR AddrIndex below.
457     AddrReg = getXRegFromWReg(ScratchRegs[0]);
458     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
459                                      .addReg(AddrReg)
460                                      .addReg(AArch64::XZR)
461                                      .addReg(AArch64::XZR)
462                                      .addImm(0));
463   } else {
464     // If one of the scratch registers is used for the call target (e.g.
465     // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
466     // temporary register instead (in this case, AArch64::W9) as the check
467     // is immediately followed by the call instruction.
468     for (auto &Reg : ScratchRegs) {
469       if (Reg == getWRegFromXReg(AddrReg)) {
470         Reg = AArch64::W9;
471         break;
472       }
473     }
474     assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
475            "Invalid scratch registers for KCFI_CHECK");
476 
477     // Adjust the offset for patchable-function-prefix. This assumes that
478     // patchable-function-prefix is the same for all functions.
479     int64_t PrefixNops = 0;
480     (void)MI.getMF()
481         ->getFunction()
482         .getFnAttribute("patchable-function-prefix")
483         .getValueAsString()
484         .getAsInteger(10, PrefixNops);
485 
486     // Load the target function type hash.
487     EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
488                                      .addReg(ScratchRegs[0])
489                                      .addReg(AddrReg)
490                                      .addImm(-(PrefixNops * 4 + 4)));
491   }
492 
493   // Load the expected type hash.
494   const int64_t Type = MI.getOperand(1).getImm();
495   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
496                                    .addReg(ScratchRegs[1])
497                                    .addReg(ScratchRegs[1])
498                                    .addImm(Type & 0xFFFF)
499                                    .addImm(0));
500   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::MOVKWi)
501                                    .addReg(ScratchRegs[1])
502                                    .addReg(ScratchRegs[1])
503                                    .addImm((Type >> 16) & 0xFFFF)
504                                    .addImm(16));
505 
506   // Compare the hashes and trap if there's a mismatch.
507   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
508                                    .addReg(AArch64::WZR)
509                                    .addReg(ScratchRegs[0])
510                                    .addReg(ScratchRegs[1])
511                                    .addImm(0));
512 
513   MCSymbol *Pass = OutContext.createTempSymbol();
514   EmitToStreamer(*OutStreamer,
515                  MCInstBuilder(AArch64::Bcc)
516                      .addImm(AArch64CC::EQ)
517                      .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
518 
519   // The base ESR is 0x8000 and the register information is encoded in bits
520   // 0-9 as follows:
521   // - 0-4: n, where the register Xn contains the target address
522   // - 5-9: m, where the register Wm contains the expected type hash
523   // Where n, m are in [0, 30].
524   unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
525   unsigned AddrIndex;
526   switch (AddrReg) {
527   default:
528     AddrIndex = AddrReg - AArch64::X0;
529     break;
530   case AArch64::FP:
531     AddrIndex = 29;
532     break;
533   case AArch64::LR:
534     AddrIndex = 30;
535     break;
536   }
537 
538   assert(AddrIndex < 31 && TypeIndex < 31);
539 
540   unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
541   EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
542   OutStreamer->emitLabel(Pass);
543 }
544 
545 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
546   Register Reg = MI.getOperand(0).getReg();
547   bool IsShort =
548       MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES;
549   uint32_t AccessInfo = MI.getOperand(1).getImm();
550   MCSymbol *&Sym =
551       HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)];
552   if (!Sym) {
553     // FIXME: Make this work on non-ELF.
554     if (!TM.getTargetTriple().isOSBinFormatELF())
555       report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
556 
557     std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
558                           utostr(AccessInfo);
559     if (IsShort)
560       SymName += "_short_v2";
561     Sym = OutContext.getOrCreateSymbol(SymName);
562   }
563 
564   EmitToStreamer(*OutStreamer,
565                  MCInstBuilder(AArch64::BL)
566                      .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
567 }
568 
569 void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
570   if (HwasanMemaccessSymbols.empty())
571     return;
572 
573   const Triple &TT = TM.getTargetTriple();
574   assert(TT.isOSBinFormatELF());
575   std::unique_ptr<MCSubtargetInfo> STI(
576       TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
577   assert(STI && "Unable to create subtarget info");
578 
579   MCSymbol *HwasanTagMismatchV1Sym =
580       OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
581   MCSymbol *HwasanTagMismatchV2Sym =
582       OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
583 
584   const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
585       MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
586   const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
587       MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
588 
589   for (auto &P : HwasanMemaccessSymbols) {
590     unsigned Reg = std::get<0>(P.first);
591     bool IsShort = std::get<1>(P.first);
592     uint32_t AccessInfo = std::get<2>(P.first);
593     const MCSymbolRefExpr *HwasanTagMismatchRef =
594         IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
595     MCSymbol *Sym = P.second;
596 
597     bool HasMatchAllTag =
598         (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
599     uint8_t MatchAllTag =
600         (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
601     unsigned Size =
602         1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
603     bool CompileKernel =
604         (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
605 
606     OutStreamer->switchSection(OutContext.getELFSection(
607         ".text.hot", ELF::SHT_PROGBITS,
608         ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
609         /*IsComdat=*/true));
610 
611     OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
612     OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
613     OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
614     OutStreamer->emitLabel(Sym);
615 
616     OutStreamer->emitInstruction(MCInstBuilder(AArch64::SBFMXri)
617                                      .addReg(AArch64::X16)
618                                      .addReg(Reg)
619                                      .addImm(4)
620                                      .addImm(55),
621                                  *STI);
622     OutStreamer->emitInstruction(
623         MCInstBuilder(AArch64::LDRBBroX)
624             .addReg(AArch64::W16)
625             .addReg(IsShort ? AArch64::X20 : AArch64::X9)
626             .addReg(AArch64::X16)
627             .addImm(0)
628             .addImm(0),
629         *STI);
630     OutStreamer->emitInstruction(
631         MCInstBuilder(AArch64::SUBSXrs)
632             .addReg(AArch64::XZR)
633             .addReg(AArch64::X16)
634             .addReg(Reg)
635             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
636         *STI);
637     MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
638     OutStreamer->emitInstruction(
639         MCInstBuilder(AArch64::Bcc)
640             .addImm(AArch64CC::NE)
641             .addExpr(MCSymbolRefExpr::create(HandleMismatchOrPartialSym,
642                                              OutContext)),
643         *STI);
644     MCSymbol *ReturnSym = OutContext.createTempSymbol();
645     OutStreamer->emitLabel(ReturnSym);
646     OutStreamer->emitInstruction(
647         MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
648     OutStreamer->emitLabel(HandleMismatchOrPartialSym);
649 
650     if (HasMatchAllTag) {
651       OutStreamer->emitInstruction(MCInstBuilder(AArch64::UBFMXri)
652                                        .addReg(AArch64::X17)
653                                        .addReg(Reg)
654                                        .addImm(56)
655                                        .addImm(63),
656                                    *STI);
657       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSXri)
658                                        .addReg(AArch64::XZR)
659                                        .addReg(AArch64::X17)
660                                        .addImm(MatchAllTag)
661                                        .addImm(0),
662                                    *STI);
663       OutStreamer->emitInstruction(
664           MCInstBuilder(AArch64::Bcc)
665               .addImm(AArch64CC::EQ)
666               .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
667           *STI);
668     }
669 
670     if (IsShort) {
671       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWri)
672                                        .addReg(AArch64::WZR)
673                                        .addReg(AArch64::W16)
674                                        .addImm(15)
675                                        .addImm(0),
676                                    *STI);
677       MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
678       OutStreamer->emitInstruction(
679           MCInstBuilder(AArch64::Bcc)
680               .addImm(AArch64CC::HI)
681               .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
682           *STI);
683 
684       OutStreamer->emitInstruction(
685           MCInstBuilder(AArch64::ANDXri)
686               .addReg(AArch64::X17)
687               .addReg(Reg)
688               .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
689           *STI);
690       if (Size != 1)
691         OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
692                                          .addReg(AArch64::X17)
693                                          .addReg(AArch64::X17)
694                                          .addImm(Size - 1)
695                                          .addImm(0),
696                                      *STI);
697       OutStreamer->emitInstruction(MCInstBuilder(AArch64::SUBSWrs)
698                                        .addReg(AArch64::WZR)
699                                        .addReg(AArch64::W16)
700                                        .addReg(AArch64::W17)
701                                        .addImm(0),
702                                    *STI);
703       OutStreamer->emitInstruction(
704           MCInstBuilder(AArch64::Bcc)
705               .addImm(AArch64CC::LS)
706               .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
707           *STI);
708 
709       OutStreamer->emitInstruction(
710           MCInstBuilder(AArch64::ORRXri)
711               .addReg(AArch64::X16)
712               .addReg(Reg)
713               .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
714           *STI);
715       OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBui)
716                                        .addReg(AArch64::W16)
717                                        .addReg(AArch64::X16)
718                                        .addImm(0),
719                                    *STI);
720       OutStreamer->emitInstruction(
721           MCInstBuilder(AArch64::SUBSXrs)
722               .addReg(AArch64::XZR)
723               .addReg(AArch64::X16)
724               .addReg(Reg)
725               .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
726           *STI);
727       OutStreamer->emitInstruction(
728           MCInstBuilder(AArch64::Bcc)
729               .addImm(AArch64CC::EQ)
730               .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
731           *STI);
732 
733       OutStreamer->emitLabel(HandleMismatchSym);
734     }
735 
736     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
737                                      .addReg(AArch64::SP)
738                                      .addReg(AArch64::X0)
739                                      .addReg(AArch64::X1)
740                                      .addReg(AArch64::SP)
741                                      .addImm(-32),
742                                  *STI);
743     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXi)
744                                      .addReg(AArch64::FP)
745                                      .addReg(AArch64::LR)
746                                      .addReg(AArch64::SP)
747                                      .addImm(29),
748                                  *STI);
749 
750     if (Reg != AArch64::X0)
751       OutStreamer->emitInstruction(MCInstBuilder(AArch64::ORRXrs)
752                                        .addReg(AArch64::X0)
753                                        .addReg(AArch64::XZR)
754                                        .addReg(Reg)
755                                        .addImm(0),
756                                    *STI);
757     OutStreamer->emitInstruction(
758         MCInstBuilder(AArch64::MOVZXi)
759             .addReg(AArch64::X1)
760             .addImm(AccessInfo & HWASanAccessInfo::RuntimeMask)
761             .addImm(0),
762         *STI);
763 
764     if (CompileKernel) {
765       // The Linux kernel's dynamic loader doesn't support GOT relative
766       // relocations, but it doesn't support late binding either, so just call
767       // the function directly.
768       OutStreamer->emitInstruction(
769           MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef), *STI);
770     } else {
771       // Intentionally load the GOT entry and branch to it, rather than possibly
772       // late binding the function, which may clobber the registers before we
773       // have a chance to save them.
774       OutStreamer->emitInstruction(
775           MCInstBuilder(AArch64::ADRP)
776               .addReg(AArch64::X16)
777               .addExpr(AArch64MCExpr::create(
778                   HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_PAGE,
779                   OutContext)),
780           *STI);
781       OutStreamer->emitInstruction(
782           MCInstBuilder(AArch64::LDRXui)
783               .addReg(AArch64::X16)
784               .addReg(AArch64::X16)
785               .addExpr(AArch64MCExpr::create(
786                   HwasanTagMismatchRef, AArch64MCExpr::VariantKind::VK_GOT_LO12,
787                   OutContext)),
788           *STI);
789       OutStreamer->emitInstruction(
790           MCInstBuilder(AArch64::BR).addReg(AArch64::X16), *STI);
791     }
792   }
793 }
794 
795 void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
796   emitHwasanMemaccessSymbols(M);
797 
798   const Triple &TT = TM.getTargetTriple();
799   if (TT.isOSBinFormatMachO()) {
800     // Funny Darwin hack: This flag tells the linker that no global symbols
801     // contain code that falls through to other global symbols (e.g. the obvious
802     // implementation of multiple entry points).  If this doesn't occur, the
803     // linker can safely perform dead code stripping.  Since LLVM never
804     // generates code that does this, it is always safe to set.
805     OutStreamer->emitAssemblerFlag(MCAF_SubsectionsViaSymbols);
806   }
807 
808   // Emit stack and fault map information.
809   FM.serializeToFaultMapSection();
810 
811 }
812 
813 void AArch64AsmPrinter::emitLOHs() {
814   SmallVector<MCSymbol *, 3> MCArgs;
815 
816   for (const auto &D : AArch64FI->getLOHContainer()) {
817     for (const MachineInstr *MI : D.getArgs()) {
818       MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
819       assert(LabelIt != LOHInstToLabel.end() &&
820              "Label hasn't been inserted for LOH related instruction");
821       MCArgs.push_back(LabelIt->second);
822     }
823     OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
824     MCArgs.clear();
825   }
826 }
827 
828 void AArch64AsmPrinter::emitFunctionBodyEnd() {
829   if (!AArch64FI->getLOHRelated().empty())
830     emitLOHs();
831 }
832 
833 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
834 MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
835   // Darwin uses a linker-private symbol name for constant-pools (to
836   // avoid addends on the relocation?), ELF has no such concept and
837   // uses a normal private symbol.
838   if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
839     return OutContext.getOrCreateSymbol(
840         Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
841         Twine(getFunctionNumber()) + "_" + Twine(CPID));
842 
843   return AsmPrinter::GetCPISymbol(CPID);
844 }
845 
846 void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
847                                      raw_ostream &O) {
848   const MachineOperand &MO = MI->getOperand(OpNum);
849   switch (MO.getType()) {
850   default:
851     llvm_unreachable("<unknown operand type>");
852   case MachineOperand::MO_Register: {
853     Register Reg = MO.getReg();
854     assert(Reg.isPhysical());
855     assert(!MO.getSubReg() && "Subregs should be eliminated!");
856     O << AArch64InstPrinter::getRegisterName(Reg);
857     break;
858   }
859   case MachineOperand::MO_Immediate: {
860     O << MO.getImm();
861     break;
862   }
863   case MachineOperand::MO_GlobalAddress: {
864     PrintSymbolOperand(MO, O);
865     break;
866   }
867   case MachineOperand::MO_BlockAddress: {
868     MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
869     Sym->print(O, MAI);
870     break;
871   }
872   }
873 }
874 
875 bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
876                                           raw_ostream &O) {
877   Register Reg = MO.getReg();
878   switch (Mode) {
879   default:
880     return true; // Unknown mode.
881   case 'w':
882     Reg = getWRegFromXReg(Reg);
883     break;
884   case 'x':
885     Reg = getXRegFromWReg(Reg);
886     break;
887   case 't':
888     Reg = getXRegFromXRegTuple(Reg);
889     break;
890   }
891 
892   O << AArch64InstPrinter::getRegisterName(Reg);
893   return false;
894 }
895 
896 // Prints the register in MO using class RC using the offset in the
897 // new register class. This should not be used for cross class
898 // printing.
899 bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
900                                            const TargetRegisterClass *RC,
901                                            unsigned AltName, raw_ostream &O) {
902   assert(MO.isReg() && "Should only get here with a register!");
903   const TargetRegisterInfo *RI = STI->getRegisterInfo();
904   Register Reg = MO.getReg();
905   unsigned RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
906   if (!RI->regsOverlap(RegToPrint, Reg))
907     return true;
908   O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
909   return false;
910 }
911 
912 bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
913                                         const char *ExtraCode, raw_ostream &O) {
914   const MachineOperand &MO = MI->getOperand(OpNum);
915 
916   // First try the generic code, which knows about modifiers like 'c' and 'n'.
917   if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
918     return false;
919 
920   // Does this asm operand have a single letter operand modifier?
921   if (ExtraCode && ExtraCode[0]) {
922     if (ExtraCode[1] != 0)
923       return true; // Unknown modifier.
924 
925     switch (ExtraCode[0]) {
926     default:
927       return true; // Unknown modifier.
928     case 'w':      // Print W register
929     case 'x':      // Print X register
930       if (MO.isReg())
931         return printAsmMRegister(MO, ExtraCode[0], O);
932       if (MO.isImm() && MO.getImm() == 0) {
933         unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
934         O << AArch64InstPrinter::getRegisterName(Reg);
935         return false;
936       }
937       printOperand(MI, OpNum, O);
938       return false;
939     case 'b': // Print B register.
940     case 'h': // Print H register.
941     case 's': // Print S register.
942     case 'd': // Print D register.
943     case 'q': // Print Q register.
944     case 'z': // Print Z register.
945       if (MO.isReg()) {
946         const TargetRegisterClass *RC;
947         switch (ExtraCode[0]) {
948         case 'b':
949           RC = &AArch64::FPR8RegClass;
950           break;
951         case 'h':
952           RC = &AArch64::FPR16RegClass;
953           break;
954         case 's':
955           RC = &AArch64::FPR32RegClass;
956           break;
957         case 'd':
958           RC = &AArch64::FPR64RegClass;
959           break;
960         case 'q':
961           RC = &AArch64::FPR128RegClass;
962           break;
963         case 'z':
964           RC = &AArch64::ZPRRegClass;
965           break;
966         default:
967           return true;
968         }
969         return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
970       }
971       printOperand(MI, OpNum, O);
972       return false;
973     }
974   }
975 
976   // According to ARM, we should emit x and v registers unless we have a
977   // modifier.
978   if (MO.isReg()) {
979     Register Reg = MO.getReg();
980 
981     // If this is a w or x register, print an x register.
982     if (AArch64::GPR32allRegClass.contains(Reg) ||
983         AArch64::GPR64allRegClass.contains(Reg))
984       return printAsmMRegister(MO, 'x', O);
985 
986     // If this is an x register tuple, print an x register.
987     if (AArch64::GPR64x8ClassRegClass.contains(Reg))
988       return printAsmMRegister(MO, 't', O);
989 
990     unsigned AltName = AArch64::NoRegAltName;
991     const TargetRegisterClass *RegClass;
992     if (AArch64::ZPRRegClass.contains(Reg)) {
993       RegClass = &AArch64::ZPRRegClass;
994     } else if (AArch64::PPRRegClass.contains(Reg)) {
995       RegClass = &AArch64::PPRRegClass;
996     } else if (AArch64::PNRRegClass.contains(Reg)) {
997       RegClass = &AArch64::PNRRegClass;
998     } else {
999       RegClass = &AArch64::FPR128RegClass;
1000       AltName = AArch64::vreg;
1001     }
1002 
1003     // If this is a b, h, s, d, or q register, print it as a v register.
1004     return printAsmRegInClass(MO, RegClass, AltName, O);
1005   }
1006 
1007   printOperand(MI, OpNum, O);
1008   return false;
1009 }
1010 
1011 bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1012                                               unsigned OpNum,
1013                                               const char *ExtraCode,
1014                                               raw_ostream &O) {
1015   if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1016     return true; // Unknown modifier.
1017 
1018   const MachineOperand &MO = MI->getOperand(OpNum);
1019   assert(MO.isReg() && "unexpected inline asm memory operand");
1020   O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1021   return false;
1022 }
1023 
1024 void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1025                                                raw_ostream &OS) {
1026   unsigned NOps = MI->getNumOperands();
1027   assert(NOps == 4);
1028   OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1029   // cast away const; DIetc do not take const operands for some reason.
1030   OS << MI->getDebugVariable()->getName();
1031   OS << " <- ";
1032   // Frame address.  Currently handles register +- offset only.
1033   assert(MI->isIndirectDebugValue());
1034   OS << '[';
1035   for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1036                                          MI->debug_operands().end());
1037        I < E; ++I) {
1038     if (I != 0)
1039       OS << ", ";
1040     printOperand(MI, I, OS);
1041   }
1042   OS << ']';
1043   OS << "+";
1044   printOperand(MI, NOps - 2, OS);
1045 }
1046 
1047 void AArch64AsmPrinter::emitJumpTableInfo() {
1048   const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1049   if (!MJTI) return;
1050 
1051   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1052   if (JT.empty()) return;
1053 
1054   const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1055   MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM);
1056   OutStreamer->switchSection(ReadOnlySec);
1057 
1058   auto AFI = MF->getInfo<AArch64FunctionInfo>();
1059   for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
1060     const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1061 
1062     // If this jump table was deleted, ignore it.
1063     if (JTBBs.empty()) continue;
1064 
1065     unsigned Size = AFI->getJumpTableEntrySize(JTI);
1066     emitAlignment(Align(Size));
1067     OutStreamer->emitLabel(GetJTISymbol(JTI));
1068 
1069     const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1070     const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1071 
1072     for (auto *JTBB : JTBBs) {
1073       const MCExpr *Value =
1074           MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1075 
1076       // Each entry is:
1077       //     .byte/.hword (LBB - Lbase)>>2
1078       // or plain:
1079       //     .word LBB - Lbase
1080       Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1081       if (Size != 4)
1082         Value = MCBinaryExpr::createLShr(
1083             Value, MCConstantExpr::create(2, OutContext), OutContext);
1084 
1085       OutStreamer->emitValue(Value, Size);
1086     }
1087   }
1088 }
1089 
1090 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1091            codeview::JumpTableEntrySize>
1092 AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1093                                             const MachineInstr *BranchInstr,
1094                                             const MCSymbol *BranchLabel) const {
1095   const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1096   const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1097   codeview::JumpTableEntrySize EntrySize;
1098   switch (AFI->getJumpTableEntrySize(JTI)) {
1099   case 1:
1100     EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1101     break;
1102   case 2:
1103     EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1104     break;
1105   case 4:
1106     EntrySize = codeview::JumpTableEntrySize::Int32;
1107     break;
1108   default:
1109     llvm_unreachable("Unexpected jump table entry size");
1110   }
1111   return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1112 }
1113 
1114 void AArch64AsmPrinter::emitFunctionEntryLabel() {
1115   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1116       MF->getFunction().getCallingConv() ==
1117           CallingConv::AArch64_SVE_VectorCall ||
1118       MF->getInfo<AArch64FunctionInfo>()->isSVECC()) {
1119     auto *TS =
1120         static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1121     TS->emitDirectiveVariantPCS(CurrentFnSym);
1122   }
1123 
1124   if (TM.getTargetTriple().isWindowsArm64EC()) {
1125     // For ARM64EC targets, a function definition's name is mangled differently
1126     // from the normal symbol. We emit the alias from the unmangled symbol to
1127     // mangled symbol name here.
1128     if (MDNode *Unmangled =
1129             MF->getFunction().getMetadata("arm64ec_unmangled_name")) {
1130       AsmPrinter::emitFunctionEntryLabel();
1131 
1132       if (MDNode *ECMangled =
1133               MF->getFunction().getMetadata("arm64ec_ecmangled_name")) {
1134         StringRef UnmangledStr =
1135             cast<MDString>(Unmangled->getOperand(0))->getString();
1136         MCSymbol *UnmangledSym =
1137             MMI->getContext().getOrCreateSymbol(UnmangledStr);
1138         StringRef ECMangledStr =
1139             cast<MDString>(ECMangled->getOperand(0))->getString();
1140         MCSymbol *ECMangledSym =
1141             MMI->getContext().getOrCreateSymbol(ECMangledStr);
1142         OutStreamer->emitSymbolAttribute(UnmangledSym, MCSA_WeakAntiDep);
1143         OutStreamer->emitAssignment(
1144             UnmangledSym,
1145             MCSymbolRefExpr::create(ECMangledSym, MCSymbolRefExpr::VK_WEAKREF,
1146                                     MMI->getContext()));
1147         OutStreamer->emitSymbolAttribute(ECMangledSym, MCSA_WeakAntiDep);
1148         OutStreamer->emitAssignment(
1149             ECMangledSym,
1150             MCSymbolRefExpr::create(CurrentFnSym, MCSymbolRefExpr::VK_WEAKREF,
1151                                     MMI->getContext()));
1152         return;
1153       } else {
1154         StringRef UnmangledStr =
1155             cast<MDString>(Unmangled->getOperand(0))->getString();
1156         MCSymbol *UnmangledSym =
1157             MMI->getContext().getOrCreateSymbol(UnmangledStr);
1158         OutStreamer->emitSymbolAttribute(UnmangledSym, MCSA_WeakAntiDep);
1159         OutStreamer->emitAssignment(
1160             UnmangledSym,
1161             MCSymbolRefExpr::create(CurrentFnSym, MCSymbolRefExpr::VK_WEAKREF,
1162                                     MMI->getContext()));
1163         return;
1164       }
1165     }
1166   }
1167 
1168   return AsmPrinter::emitFunctionEntryLabel();
1169 }
1170 
1171 /// Small jump tables contain an unsigned byte or half, representing the offset
1172 /// from the lowest-addressed possible destination to the desired basic
1173 /// block. Since all instructions are 4-byte aligned, this is further compressed
1174 /// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1175 /// materialize the correct destination we need:
1176 ///
1177 ///             adr xDest, .LBB0_0
1178 ///             ldrb wScratch, [xTable, xEntry]   (with "lsl #1" for ldrh).
1179 ///             add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1180 void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1181                                            const llvm::MachineInstr &MI) {
1182   Register DestReg = MI.getOperand(0).getReg();
1183   Register ScratchReg = MI.getOperand(1).getReg();
1184   Register ScratchRegW =
1185       STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1186   Register TableReg = MI.getOperand(2).getReg();
1187   Register EntryReg = MI.getOperand(3).getReg();
1188   int JTIdx = MI.getOperand(4).getIndex();
1189   int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1190 
1191   // This has to be first because the compression pass based its reachability
1192   // calculations on the start of the JumpTableDest instruction.
1193   auto Label =
1194       MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1195 
1196   // If we don't already have a symbol to use as the base, use the ADR
1197   // instruction itself.
1198   if (!Label) {
1199     Label = MF->getContext().createTempSymbol();
1200     AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1201     OutStreamer.emitLabel(Label);
1202   }
1203 
1204   auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1205   EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1206                                   .addReg(DestReg)
1207                                   .addExpr(LabelExpr));
1208 
1209   // Load the number of instruction-steps to offset from the label.
1210   unsigned LdrOpcode;
1211   switch (Size) {
1212   case 1: LdrOpcode = AArch64::LDRBBroX; break;
1213   case 2: LdrOpcode = AArch64::LDRHHroX; break;
1214   case 4: LdrOpcode = AArch64::LDRSWroX; break;
1215   default:
1216     llvm_unreachable("Unknown jump table size");
1217   }
1218 
1219   EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1220                                   .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1221                                   .addReg(TableReg)
1222                                   .addReg(EntryReg)
1223                                   .addImm(0)
1224                                   .addImm(Size == 1 ? 0 : 1));
1225 
1226   // Add to the already materialized base label address, multiplying by 4 if
1227   // compressed.
1228   EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1229                                   .addReg(DestReg)
1230                                   .addReg(DestReg)
1231                                   .addReg(ScratchReg)
1232                                   .addImm(Size == 4 ? 0 : 2));
1233 }
1234 
1235 void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1236                                   const llvm::MachineInstr &MI) {
1237   unsigned Opcode = MI.getOpcode();
1238   assert(STI->hasMOPS());
1239   assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1240 
1241   const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1242     if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1243       return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1244     if (Opcode == AArch64::MOPSMemoryMovePseudo)
1245       return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1246     if (Opcode == AArch64::MOPSMemorySetPseudo)
1247       return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1248     if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1249       return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1250     llvm_unreachable("Unhandled memory operation pseudo");
1251   }();
1252   const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1253                      Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1254 
1255   for (auto Op : Ops) {
1256     int i = 0;
1257     auto MCIB = MCInstBuilder(Op);
1258     // Destination registers
1259     MCIB.addReg(MI.getOperand(i++).getReg());
1260     MCIB.addReg(MI.getOperand(i++).getReg());
1261     if (!IsSet)
1262       MCIB.addReg(MI.getOperand(i++).getReg());
1263     // Input registers
1264     MCIB.addReg(MI.getOperand(i++).getReg());
1265     MCIB.addReg(MI.getOperand(i++).getReg());
1266     MCIB.addReg(MI.getOperand(i++).getReg());
1267 
1268     EmitToStreamer(OutStreamer, MCIB);
1269   }
1270 }
1271 
1272 void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1273                                       const MachineInstr &MI) {
1274   unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1275 
1276   auto &Ctx = OutStreamer.getContext();
1277   MCSymbol *MILabel = Ctx.createTempSymbol();
1278   OutStreamer.emitLabel(MILabel);
1279 
1280   SM.recordStackMap(*MILabel, MI);
1281   assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1282 
1283   // Scan ahead to trim the shadow.
1284   const MachineBasicBlock &MBB = *MI.getParent();
1285   MachineBasicBlock::const_iterator MII(MI);
1286   ++MII;
1287   while (NumNOPBytes > 0) {
1288     if (MII == MBB.end() || MII->isCall() ||
1289         MII->getOpcode() == AArch64::DBG_VALUE ||
1290         MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1291         MII->getOpcode() == TargetOpcode::STACKMAP)
1292       break;
1293     ++MII;
1294     NumNOPBytes -= 4;
1295   }
1296 
1297   // Emit nops.
1298   for (unsigned i = 0; i < NumNOPBytes; i += 4)
1299     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1300 }
1301 
1302 // Lower a patchpoint of the form:
1303 // [<def>], <id>, <numBytes>, <target>, <numArgs>
1304 void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1305                                         const MachineInstr &MI) {
1306   auto &Ctx = OutStreamer.getContext();
1307   MCSymbol *MILabel = Ctx.createTempSymbol();
1308   OutStreamer.emitLabel(MILabel);
1309   SM.recordPatchPoint(*MILabel, MI);
1310 
1311   PatchPointOpers Opers(&MI);
1312 
1313   int64_t CallTarget = Opers.getCallTarget().getImm();
1314   unsigned EncodedBytes = 0;
1315   if (CallTarget) {
1316     assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1317            "High 16 bits of call target should be zero.");
1318     Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1319     EncodedBytes = 16;
1320     // Materialize the jump address:
1321     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVZXi)
1322                                     .addReg(ScratchReg)
1323                                     .addImm((CallTarget >> 32) & 0xFFFF)
1324                                     .addImm(32));
1325     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1326                                     .addReg(ScratchReg)
1327                                     .addReg(ScratchReg)
1328                                     .addImm((CallTarget >> 16) & 0xFFFF)
1329                                     .addImm(16));
1330     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::MOVKXi)
1331                                     .addReg(ScratchReg)
1332                                     .addReg(ScratchReg)
1333                                     .addImm(CallTarget & 0xFFFF)
1334                                     .addImm(0));
1335     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1336   }
1337   // Emit padding.
1338   unsigned NumBytes = Opers.getNumPatchBytes();
1339   assert(NumBytes >= EncodedBytes &&
1340          "Patchpoint can't request size less than the length of a call.");
1341   assert((NumBytes - EncodedBytes) % 4 == 0 &&
1342          "Invalid number of NOP bytes requested!");
1343   for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1344     EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1345 }
1346 
1347 void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1348                                         const MachineInstr &MI) {
1349   StatepointOpers SOpers(&MI);
1350   if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1351     assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1352     for (unsigned i = 0; i < PatchBytes; i += 4)
1353       EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1354   } else {
1355     // Lower call target and choose correct opcode
1356     const MachineOperand &CallTarget = SOpers.getCallTarget();
1357     MCOperand CallTargetMCOp;
1358     unsigned CallOpcode;
1359     switch (CallTarget.getType()) {
1360     case MachineOperand::MO_GlobalAddress:
1361     case MachineOperand::MO_ExternalSymbol:
1362       MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1363       CallOpcode = AArch64::BL;
1364       break;
1365     case MachineOperand::MO_Immediate:
1366       CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1367       CallOpcode = AArch64::BL;
1368       break;
1369     case MachineOperand::MO_Register:
1370       CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1371       CallOpcode = AArch64::BLR;
1372       break;
1373     default:
1374       llvm_unreachable("Unsupported operand type in statepoint call target");
1375       break;
1376     }
1377 
1378     EmitToStreamer(OutStreamer,
1379                    MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1380   }
1381 
1382   auto &Ctx = OutStreamer.getContext();
1383   MCSymbol *MILabel = Ctx.createTempSymbol();
1384   OutStreamer.emitLabel(MILabel);
1385   SM.recordStatepoint(*MILabel, MI);
1386 }
1387 
1388 void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1389   // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1390   //                  <opcode>, <operands>
1391 
1392   Register DefRegister = FaultingMI.getOperand(0).getReg();
1393   FaultMaps::FaultKind FK =
1394       static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1395   MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1396   unsigned Opcode = FaultingMI.getOperand(3).getImm();
1397   unsigned OperandsBeginIdx = 4;
1398 
1399   auto &Ctx = OutStreamer->getContext();
1400   MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1401   OutStreamer->emitLabel(FaultingLabel);
1402 
1403   assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1404   FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1405 
1406   MCInst MI;
1407   MI.setOpcode(Opcode);
1408 
1409   if (DefRegister != (Register)0)
1410     MI.addOperand(MCOperand::createReg(DefRegister));
1411 
1412   for (const MachineOperand &MO :
1413        llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1414     MCOperand Dest;
1415     lowerOperand(MO, Dest);
1416     MI.addOperand(Dest);
1417   }
1418 
1419   OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1420   OutStreamer->emitInstruction(MI, getSubtargetInfo());
1421 }
1422 
1423 void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1424   Register DestReg = MI.getOperand(0).getReg();
1425   if (STI->hasZeroCycleZeroingFP() && !STI->hasZeroCycleZeroingFPWorkaround() &&
1426       STI->isNeonAvailable()) {
1427     // Convert H/S register to corresponding D register
1428     if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1429       DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1430     else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1431       DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1432     else
1433       assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1434 
1435     MCInst MOVI;
1436     MOVI.setOpcode(AArch64::MOVID);
1437     MOVI.addOperand(MCOperand::createReg(DestReg));
1438     MOVI.addOperand(MCOperand::createImm(0));
1439     EmitToStreamer(*OutStreamer, MOVI);
1440   } else {
1441     MCInst FMov;
1442     switch (MI.getOpcode()) {
1443     default: llvm_unreachable("Unexpected opcode");
1444     case AArch64::FMOVH0:
1445       FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1446       if (!STI->hasFullFP16())
1447         DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1448       FMov.addOperand(MCOperand::createReg(DestReg));
1449       FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1450       break;
1451     case AArch64::FMOVS0:
1452       FMov.setOpcode(AArch64::FMOVWSr);
1453       FMov.addOperand(MCOperand::createReg(DestReg));
1454       FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1455       break;
1456     case AArch64::FMOVD0:
1457       FMov.setOpcode(AArch64::FMOVXDr);
1458       FMov.addOperand(MCOperand::createReg(DestReg));
1459       FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1460       break;
1461     }
1462     EmitToStreamer(*OutStreamer, FMov);
1463   }
1464 }
1465 
1466 // Simple pseudo-instructions have their lowering (with expansion to real
1467 // instructions) auto-generated.
1468 #include "AArch64GenMCPseudoLowering.inc"
1469 
1470 void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
1471   AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
1472 
1473   // Do any auto-generated pseudo lowerings.
1474   if (emitPseudoExpansionLowering(*OutStreamer, MI))
1475     return;
1476 
1477   if (MI->getOpcode() == AArch64::ADRP) {
1478     for (auto &Opd : MI->operands()) {
1479       if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
1480                                 "swift_async_extendedFramePointerFlags") {
1481         ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
1482       }
1483     }
1484   }
1485 
1486   if (AArch64FI->getLOHRelated().count(MI)) {
1487     // Generate a label for LOH related instruction
1488     MCSymbol *LOHLabel = createTempSymbol("loh");
1489     // Associate the instruction with the label
1490     LOHInstToLabel[MI] = LOHLabel;
1491     OutStreamer->emitLabel(LOHLabel);
1492   }
1493 
1494   AArch64TargetStreamer *TS =
1495     static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1496   // Do any manual lowerings.
1497   switch (MI->getOpcode()) {
1498   default:
1499     break;
1500   case AArch64::HINT: {
1501     // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
1502     // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
1503     // non-empty. If MI is the initial BTI, place the
1504     // __patchable_function_entries label after BTI.
1505     if (CurrentPatchableFunctionEntrySym &&
1506         CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
1507         MI == &MF->front().front()) {
1508       int64_t Imm = MI->getOperand(0).getImm();
1509       if ((Imm & 32) && (Imm & 6)) {
1510         MCInst Inst;
1511         MCInstLowering.Lower(MI, Inst);
1512         EmitToStreamer(*OutStreamer, Inst);
1513         CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
1514         OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
1515         return;
1516       }
1517     }
1518     break;
1519   }
1520     case AArch64::MOVMCSym: {
1521       Register DestReg = MI->getOperand(0).getReg();
1522       const MachineOperand &MO_Sym = MI->getOperand(1);
1523       MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
1524       MCOperand Hi_MCSym, Lo_MCSym;
1525 
1526       Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
1527       Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
1528 
1529       MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
1530       MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
1531 
1532       MCInst MovZ;
1533       MovZ.setOpcode(AArch64::MOVZXi);
1534       MovZ.addOperand(MCOperand::createReg(DestReg));
1535       MovZ.addOperand(Hi_MCSym);
1536       MovZ.addOperand(MCOperand::createImm(16));
1537       EmitToStreamer(*OutStreamer, MovZ);
1538 
1539       MCInst MovK;
1540       MovK.setOpcode(AArch64::MOVKXi);
1541       MovK.addOperand(MCOperand::createReg(DestReg));
1542       MovK.addOperand(MCOperand::createReg(DestReg));
1543       MovK.addOperand(Lo_MCSym);
1544       MovK.addOperand(MCOperand::createImm(0));
1545       EmitToStreamer(*OutStreamer, MovK);
1546       return;
1547   }
1548   case AArch64::MOVIv2d_ns:
1549     // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
1550     // as movi is more efficient across all cores. Newer cores can eliminate
1551     // fmovs early and there is no difference with movi, but this not true for
1552     // all implementations.
1553     //
1554     // The floating-point version doesn't quite work in rare cases on older
1555     // CPUs, so on those targets we lower this instruction to movi.16b instead.
1556     if (STI->hasZeroCycleZeroingFPWorkaround() &&
1557         MI->getOperand(1).getImm() == 0) {
1558       MCInst TmpInst;
1559       TmpInst.setOpcode(AArch64::MOVIv16b_ns);
1560       TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1561       TmpInst.addOperand(MCOperand::createImm(MI->getOperand(1).getImm()));
1562       EmitToStreamer(*OutStreamer, TmpInst);
1563       return;
1564     }
1565     break;
1566 
1567   case AArch64::DBG_VALUE:
1568   case AArch64::DBG_VALUE_LIST:
1569     if (isVerbose() && OutStreamer->hasRawTextSupport()) {
1570       SmallString<128> TmpStr;
1571       raw_svector_ostream OS(TmpStr);
1572       PrintDebugValueComment(MI, OS);
1573       OutStreamer->emitRawText(StringRef(OS.str()));
1574     }
1575     return;
1576 
1577   case AArch64::EMITBKEY: {
1578       ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1579       if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1580           ExceptionHandlingType != ExceptionHandling::ARM)
1581         return;
1582 
1583       if (getFunctionCFISectionType(*MF) == CFISection::None)
1584         return;
1585 
1586       OutStreamer->emitCFIBKeyFrame();
1587       return;
1588   }
1589 
1590   case AArch64::EMITMTETAGGED: {
1591     ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
1592     if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
1593         ExceptionHandlingType != ExceptionHandling::ARM)
1594       return;
1595 
1596     if (getFunctionCFISectionType(*MF) != CFISection::None)
1597       OutStreamer->emitCFIMTETaggedFrame();
1598     return;
1599   }
1600 
1601   // Tail calls use pseudo instructions so they have the proper code-gen
1602   // attributes (isCall, isReturn, etc.). We lower them to the real
1603   // instruction here.
1604   case AArch64::TCRETURNri:
1605   case AArch64::TCRETURNriBTI:
1606   case AArch64::TCRETURNriALL: {
1607     MCInst TmpInst;
1608     TmpInst.setOpcode(AArch64::BR);
1609     TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
1610     EmitToStreamer(*OutStreamer, TmpInst);
1611     return;
1612   }
1613   case AArch64::TCRETURNdi: {
1614     MCOperand Dest;
1615     MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
1616     MCInst TmpInst;
1617     TmpInst.setOpcode(AArch64::B);
1618     TmpInst.addOperand(Dest);
1619     EmitToStreamer(*OutStreamer, TmpInst);
1620     return;
1621   }
1622   case AArch64::SpeculationBarrierISBDSBEndBB: {
1623     // Print DSB SYS + ISB
1624     MCInst TmpInstDSB;
1625     TmpInstDSB.setOpcode(AArch64::DSB);
1626     TmpInstDSB.addOperand(MCOperand::createImm(0xf));
1627     EmitToStreamer(*OutStreamer, TmpInstDSB);
1628     MCInst TmpInstISB;
1629     TmpInstISB.setOpcode(AArch64::ISB);
1630     TmpInstISB.addOperand(MCOperand::createImm(0xf));
1631     EmitToStreamer(*OutStreamer, TmpInstISB);
1632     return;
1633   }
1634   case AArch64::SpeculationBarrierSBEndBB: {
1635     // Print SB
1636     MCInst TmpInstSB;
1637     TmpInstSB.setOpcode(AArch64::SB);
1638     EmitToStreamer(*OutStreamer, TmpInstSB);
1639     return;
1640   }
1641   case AArch64::TLSDESC_CALLSEQ: {
1642     /// lower this to:
1643     ///    adrp  x0, :tlsdesc:var
1644     ///    ldr   x1, [x0, #:tlsdesc_lo12:var]
1645     ///    add   x0, x0, #:tlsdesc_lo12:var
1646     ///    .tlsdesccall var
1647     ///    blr   x1
1648     ///    (TPIDR_EL0 offset now in x0)
1649     const MachineOperand &MO_Sym = MI->getOperand(0);
1650     MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
1651     MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
1652     MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
1653     MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
1654     MCInstLowering.lowerOperand(MO_Sym, Sym);
1655     MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
1656     MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
1657 
1658     MCInst Adrp;
1659     Adrp.setOpcode(AArch64::ADRP);
1660     Adrp.addOperand(MCOperand::createReg(AArch64::X0));
1661     Adrp.addOperand(SymTLSDesc);
1662     EmitToStreamer(*OutStreamer, Adrp);
1663 
1664     MCInst Ldr;
1665     if (STI->isTargetILP32()) {
1666       Ldr.setOpcode(AArch64::LDRWui);
1667       Ldr.addOperand(MCOperand::createReg(AArch64::W1));
1668     } else {
1669       Ldr.setOpcode(AArch64::LDRXui);
1670       Ldr.addOperand(MCOperand::createReg(AArch64::X1));
1671     }
1672     Ldr.addOperand(MCOperand::createReg(AArch64::X0));
1673     Ldr.addOperand(SymTLSDescLo12);
1674     Ldr.addOperand(MCOperand::createImm(0));
1675     EmitToStreamer(*OutStreamer, Ldr);
1676 
1677     MCInst Add;
1678     if (STI->isTargetILP32()) {
1679       Add.setOpcode(AArch64::ADDWri);
1680       Add.addOperand(MCOperand::createReg(AArch64::W0));
1681       Add.addOperand(MCOperand::createReg(AArch64::W0));
1682     } else {
1683       Add.setOpcode(AArch64::ADDXri);
1684       Add.addOperand(MCOperand::createReg(AArch64::X0));
1685       Add.addOperand(MCOperand::createReg(AArch64::X0));
1686     }
1687     Add.addOperand(SymTLSDescLo12);
1688     Add.addOperand(MCOperand::createImm(AArch64_AM::getShiftValue(0)));
1689     EmitToStreamer(*OutStreamer, Add);
1690 
1691     // Emit a relocation-annotation. This expands to no code, but requests
1692     // the following instruction gets an R_AARCH64_TLSDESC_CALL.
1693     MCInst TLSDescCall;
1694     TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
1695     TLSDescCall.addOperand(Sym);
1696     EmitToStreamer(*OutStreamer, TLSDescCall);
1697 
1698     MCInst Blr;
1699     Blr.setOpcode(AArch64::BLR);
1700     Blr.addOperand(MCOperand::createReg(AArch64::X1));
1701     EmitToStreamer(*OutStreamer, Blr);
1702 
1703     return;
1704   }
1705 
1706   case AArch64::JumpTableDest32:
1707   case AArch64::JumpTableDest16:
1708   case AArch64::JumpTableDest8:
1709     LowerJumpTableDest(*OutStreamer, *MI);
1710     return;
1711 
1712   case AArch64::FMOVH0:
1713   case AArch64::FMOVS0:
1714   case AArch64::FMOVD0:
1715     emitFMov0(*MI);
1716     return;
1717 
1718   case AArch64::MOPSMemoryCopyPseudo:
1719   case AArch64::MOPSMemoryMovePseudo:
1720   case AArch64::MOPSMemorySetPseudo:
1721   case AArch64::MOPSMemorySetTaggingPseudo:
1722     LowerMOPS(*OutStreamer, *MI);
1723     return;
1724 
1725   case TargetOpcode::STACKMAP:
1726     return LowerSTACKMAP(*OutStreamer, SM, *MI);
1727 
1728   case TargetOpcode::PATCHPOINT:
1729     return LowerPATCHPOINT(*OutStreamer, SM, *MI);
1730 
1731   case TargetOpcode::STATEPOINT:
1732     return LowerSTATEPOINT(*OutStreamer, SM, *MI);
1733 
1734   case TargetOpcode::FAULTING_OP:
1735     return LowerFAULTING_OP(*MI);
1736 
1737   case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1738     LowerPATCHABLE_FUNCTION_ENTER(*MI);
1739     return;
1740 
1741   case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1742     LowerPATCHABLE_FUNCTION_EXIT(*MI);
1743     return;
1744 
1745   case TargetOpcode::PATCHABLE_TAIL_CALL:
1746     LowerPATCHABLE_TAIL_CALL(*MI);
1747     return;
1748   case TargetOpcode::PATCHABLE_EVENT_CALL:
1749     return LowerPATCHABLE_EVENT_CALL(*MI, false);
1750   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
1751     return LowerPATCHABLE_EVENT_CALL(*MI, true);
1752 
1753   case AArch64::KCFI_CHECK:
1754     LowerKCFI_CHECK(*MI);
1755     return;
1756 
1757   case AArch64::HWASAN_CHECK_MEMACCESS:
1758   case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
1759     LowerHWASAN_CHECK_MEMACCESS(*MI);
1760     return;
1761 
1762   case AArch64::SEH_StackAlloc:
1763     TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
1764     return;
1765 
1766   case AArch64::SEH_SaveFPLR:
1767     TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
1768     return;
1769 
1770   case AArch64::SEH_SaveFPLR_X:
1771     assert(MI->getOperand(0).getImm() < 0 &&
1772            "Pre increment SEH opcode must have a negative offset");
1773     TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
1774     return;
1775 
1776   case AArch64::SEH_SaveReg:
1777     TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
1778                                MI->getOperand(1).getImm());
1779     return;
1780 
1781   case AArch64::SEH_SaveReg_X:
1782     assert(MI->getOperand(1).getImm() < 0 &&
1783            "Pre increment SEH opcode must have a negative offset");
1784     TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
1785                                 -MI->getOperand(1).getImm());
1786     return;
1787 
1788   case AArch64::SEH_SaveRegP:
1789     if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
1790         MI->getOperand(0).getImm() <= 28) {
1791       assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
1792              "Register paired with LR must be odd");
1793       TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
1794                                     MI->getOperand(2).getImm());
1795       return;
1796     }
1797     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1798             "Non-consecutive registers not allowed for save_regp");
1799     TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
1800                                 MI->getOperand(2).getImm());
1801     return;
1802 
1803   case AArch64::SEH_SaveRegP_X:
1804     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1805             "Non-consecutive registers not allowed for save_regp_x");
1806     assert(MI->getOperand(2).getImm() < 0 &&
1807            "Pre increment SEH opcode must have a negative offset");
1808     TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
1809                                  -MI->getOperand(2).getImm());
1810     return;
1811 
1812   case AArch64::SEH_SaveFReg:
1813     TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
1814                                 MI->getOperand(1).getImm());
1815     return;
1816 
1817   case AArch64::SEH_SaveFReg_X:
1818     assert(MI->getOperand(1).getImm() < 0 &&
1819            "Pre increment SEH opcode must have a negative offset");
1820     TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
1821                                  -MI->getOperand(1).getImm());
1822     return;
1823 
1824   case AArch64::SEH_SaveFRegP:
1825     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1826             "Non-consecutive registers not allowed for save_regp");
1827     TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
1828                                  MI->getOperand(2).getImm());
1829     return;
1830 
1831   case AArch64::SEH_SaveFRegP_X:
1832     assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
1833             "Non-consecutive registers not allowed for save_regp_x");
1834     assert(MI->getOperand(2).getImm() < 0 &&
1835            "Pre increment SEH opcode must have a negative offset");
1836     TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
1837                                   -MI->getOperand(2).getImm());
1838     return;
1839 
1840   case AArch64::SEH_SetFP:
1841     TS->emitARM64WinCFISetFP();
1842     return;
1843 
1844   case AArch64::SEH_AddFP:
1845     TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
1846     return;
1847 
1848   case AArch64::SEH_Nop:
1849     TS->emitARM64WinCFINop();
1850     return;
1851 
1852   case AArch64::SEH_PrologEnd:
1853     TS->emitARM64WinCFIPrologEnd();
1854     return;
1855 
1856   case AArch64::SEH_EpilogStart:
1857     TS->emitARM64WinCFIEpilogStart();
1858     return;
1859 
1860   case AArch64::SEH_EpilogEnd:
1861     TS->emitARM64WinCFIEpilogEnd();
1862     return;
1863 
1864   case AArch64::SEH_PACSignLR:
1865     TS->emitARM64WinCFIPACSignLR();
1866     return;
1867 
1868   case AArch64::SEH_SaveAnyRegQP:
1869     assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
1870            "Non-consecutive registers not allowed for save_any_reg");
1871     assert(MI->getOperand(2).getImm() >= 0 &&
1872            "SaveAnyRegQP SEH opcode offset must be non-negative");
1873     assert(MI->getOperand(2).getImm() <= 1008 &&
1874            "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
1875     TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
1876                                     MI->getOperand(2).getImm());
1877     return;
1878 
1879   case AArch64::SEH_SaveAnyRegQPX:
1880     assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
1881            "Non-consecutive registers not allowed for save_any_reg");
1882     assert(MI->getOperand(2).getImm() < 0 &&
1883            "SaveAnyRegQPX SEH opcode offset must be negative");
1884     assert(MI->getOperand(2).getImm() >= -1008 &&
1885            "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
1886     TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
1887                                      -MI->getOperand(2).getImm());
1888     return;
1889   }
1890 
1891   // Finally, do the automated lowerings for everything else.
1892   MCInst TmpInst;
1893   MCInstLowering.Lower(MI, TmpInst);
1894   EmitToStreamer(*OutStreamer, TmpInst);
1895 }
1896 
1897 void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
1898                                                MCSymbol *LazyPointer) {
1899   // _ifunc:
1900   //   adrp    x16, lazy_pointer@GOTPAGE
1901   //   ldr     x16, [x16, lazy_pointer@GOTPAGEOFF]
1902   //   ldr     x16, [x16]
1903   //   br      x16
1904 
1905   {
1906     MCInst Adrp;
1907     Adrp.setOpcode(AArch64::ADRP);
1908     Adrp.addOperand(MCOperand::createReg(AArch64::X16));
1909     MCOperand SymPage;
1910     MCInstLowering.lowerOperand(
1911         MachineOperand::CreateMCSymbol(LazyPointer,
1912                                        AArch64II::MO_GOT | AArch64II::MO_PAGE),
1913         SymPage);
1914     Adrp.addOperand(SymPage);
1915     OutStreamer->emitInstruction(Adrp, *STI);
1916   }
1917 
1918   {
1919     MCInst Ldr;
1920     Ldr.setOpcode(AArch64::LDRXui);
1921     Ldr.addOperand(MCOperand::createReg(AArch64::X16));
1922     Ldr.addOperand(MCOperand::createReg(AArch64::X16));
1923     MCOperand SymPageOff;
1924     MCInstLowering.lowerOperand(
1925         MachineOperand::CreateMCSymbol(LazyPointer, AArch64II::MO_GOT |
1926                                                         AArch64II::MO_PAGEOFF),
1927         SymPageOff);
1928     Ldr.addOperand(SymPageOff);
1929     Ldr.addOperand(MCOperand::createImm(0));
1930     OutStreamer->emitInstruction(Ldr, *STI);
1931   }
1932 
1933   OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRXui)
1934                                    .addReg(AArch64::X16)
1935                                    .addReg(AArch64::X16)
1936                                    .addImm(0),
1937                                *STI);
1938 
1939   OutStreamer->emitInstruction(MCInstBuilder(TM.getTargetTriple().isArm64e()
1940                                                  ? AArch64::BRAAZ
1941                                                  : AArch64::BR)
1942                                    .addReg(AArch64::X16),
1943                                *STI);
1944 }
1945 
1946 void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
1947                                                      const GlobalIFunc &GI,
1948                                                      MCSymbol *LazyPointer) {
1949   // These stub helpers are only ever called once, so here we're optimizing for
1950   // minimum size by using the pre-indexed store variants, which saves a few
1951   // bytes of instructions to bump & restore sp.
1952 
1953   // _ifunc.stub_helper:
1954   //   stp	fp, lr, [sp, #-16]!
1955   //   mov	fp, sp
1956   //   stp	x1, x0, [sp, #-16]!
1957   //   stp	x3, x2, [sp, #-16]!
1958   //   stp	x5, x4, [sp, #-16]!
1959   //   stp	x7, x6, [sp, #-16]!
1960   //   stp	d1, d0, [sp, #-16]!
1961   //   stp	d3, d2, [sp, #-16]!
1962   //   stp	d5, d4, [sp, #-16]!
1963   //   stp	d7, d6, [sp, #-16]!
1964   //   bl	_resolver
1965   //   adrp	x16, lazy_pointer@GOTPAGE
1966   //   ldr	x16, [x16, lazy_pointer@GOTPAGEOFF]
1967   //   str	x0, [x16]
1968   //   mov	x16, x0
1969   //   ldp	d7, d6, [sp], #16
1970   //   ldp	d5, d4, [sp], #16
1971   //   ldp	d3, d2, [sp], #16
1972   //   ldp	d1, d0, [sp], #16
1973   //   ldp	x7, x6, [sp], #16
1974   //   ldp	x5, x4, [sp], #16
1975   //   ldp	x3, x2, [sp], #16
1976   //   ldp	x1, x0, [sp], #16
1977   //   ldp	fp, lr, [sp], #16
1978   //   br	x16
1979 
1980   OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
1981                                    .addReg(AArch64::SP)
1982                                    .addReg(AArch64::FP)
1983                                    .addReg(AArch64::LR)
1984                                    .addReg(AArch64::SP)
1985                                    .addImm(-2),
1986                                *STI);
1987 
1988   OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
1989                                    .addReg(AArch64::FP)
1990                                    .addReg(AArch64::SP)
1991                                    .addImm(0)
1992                                    .addImm(0),
1993                                *STI);
1994 
1995   for (int I = 0; I != 4; ++I)
1996     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPXpre)
1997                                      .addReg(AArch64::SP)
1998                                      .addReg(AArch64::X1 + 2 * I)
1999                                      .addReg(AArch64::X0 + 2 * I)
2000                                      .addReg(AArch64::SP)
2001                                      .addImm(-2),
2002                                  *STI);
2003 
2004   for (int I = 0; I != 4; ++I)
2005     OutStreamer->emitInstruction(MCInstBuilder(AArch64::STPDpre)
2006                                      .addReg(AArch64::SP)
2007                                      .addReg(AArch64::D1 + 2 * I)
2008                                      .addReg(AArch64::D0 + 2 * I)
2009                                      .addReg(AArch64::SP)
2010                                      .addImm(-2),
2011                                  *STI);
2012 
2013   OutStreamer->emitInstruction(
2014       MCInstBuilder(AArch64::BL)
2015           .addOperand(MCOperand::createExpr(lowerConstant(GI.getResolver()))),
2016       *STI);
2017 
2018   {
2019     MCInst Adrp;
2020     Adrp.setOpcode(AArch64::ADRP);
2021     Adrp.addOperand(MCOperand::createReg(AArch64::X16));
2022     MCOperand SymPage;
2023     MCInstLowering.lowerOperand(
2024         MachineOperand::CreateES(LazyPointer->getName().data() + 1,
2025                                  AArch64II::MO_GOT | AArch64II::MO_PAGE),
2026         SymPage);
2027     Adrp.addOperand(SymPage);
2028     OutStreamer->emitInstruction(Adrp, *STI);
2029   }
2030 
2031   {
2032     MCInst Ldr;
2033     Ldr.setOpcode(AArch64::LDRXui);
2034     Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2035     Ldr.addOperand(MCOperand::createReg(AArch64::X16));
2036     MCOperand SymPageOff;
2037     MCInstLowering.lowerOperand(
2038         MachineOperand::CreateES(LazyPointer->getName().data() + 1,
2039                                  AArch64II::MO_GOT | AArch64II::MO_PAGEOFF),
2040         SymPageOff);
2041     Ldr.addOperand(SymPageOff);
2042     Ldr.addOperand(MCOperand::createImm(0));
2043     OutStreamer->emitInstruction(Ldr, *STI);
2044   }
2045 
2046   OutStreamer->emitInstruction(MCInstBuilder(AArch64::STRXui)
2047                                    .addReg(AArch64::X0)
2048                                    .addReg(AArch64::X16)
2049                                    .addImm(0),
2050                                *STI);
2051 
2052   OutStreamer->emitInstruction(MCInstBuilder(AArch64::ADDXri)
2053                                    .addReg(AArch64::X16)
2054                                    .addReg(AArch64::X0)
2055                                    .addImm(0)
2056                                    .addImm(0),
2057                                *STI);
2058 
2059   for (int I = 3; I != -1; --I)
2060     OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPDpost)
2061                                      .addReg(AArch64::SP)
2062                                      .addReg(AArch64::D1 + 2 * I)
2063                                      .addReg(AArch64::D0 + 2 * I)
2064                                      .addReg(AArch64::SP)
2065                                      .addImm(2),
2066                                  *STI);
2067 
2068   for (int I = 3; I != -1; --I)
2069     OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPXpost)
2070                                      .addReg(AArch64::SP)
2071                                      .addReg(AArch64::X1 + 2 * I)
2072                                      .addReg(AArch64::X0 + 2 * I)
2073                                      .addReg(AArch64::SP)
2074                                      .addImm(2),
2075                                  *STI);
2076 
2077   OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDPXpost)
2078                                    .addReg(AArch64::SP)
2079                                    .addReg(AArch64::FP)
2080                                    .addReg(AArch64::LR)
2081                                    .addReg(AArch64::SP)
2082                                    .addImm(2),
2083                                *STI);
2084 
2085   OutStreamer->emitInstruction(MCInstBuilder(TM.getTargetTriple().isArm64e()
2086                                                  ? AArch64::BRAAZ
2087                                                  : AArch64::BR)
2088                                    .addReg(AArch64::X16),
2089                                *STI);
2090 }
2091 
2092 const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV) {
2093   if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
2094     return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
2095                                    OutContext);
2096   }
2097 
2098   return AsmPrinter::lowerConstant(CV);
2099 }
2100 
2101 // Force static initialization.
2102 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmPrinter() {
2103   RegisterAsmPrinter<AArch64AsmPrinter> X(getTheAArch64leTarget());
2104   RegisterAsmPrinter<AArch64AsmPrinter> Y(getTheAArch64beTarget());
2105   RegisterAsmPrinter<AArch64AsmPrinter> Z(getTheARM64Target());
2106   RegisterAsmPrinter<AArch64AsmPrinter> W(getTheARM64_32Target());
2107   RegisterAsmPrinter<AArch64AsmPrinter> V(getTheAArch64_32Target());
2108 }
2109