xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp (revision 0d8fe2373503aeac48492f28073049a8bfa4feb5)
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64AddressingModes.h"
10 #include "MCTargetDesc/AArch64InstPrinter.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "TargetInfo/AArch64TargetInfo.h"
15 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCLinkerOptimizationHint.h"
31 #include "llvm/MC/MCObjectFileInfo.h"
32 #include "llvm/MC/MCParser/MCAsmLexer.h"
33 #include "llvm/MC/MCParser/MCAsmParser.h"
34 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
35 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
36 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/MC/MCStreamer.h"
39 #include "llvm/MC/MCSubtargetInfo.h"
40 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/MC/MCTargetOptions.h"
42 #include "llvm/MC/SubtargetFeature.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/SMLoc.h"
49 #include "llvm/Support/TargetParser.h"
50 #include "llvm/Support/TargetRegistry.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include <cassert>
53 #include <cctype>
54 #include <cstdint>
55 #include <cstdio>
56 #include <string>
57 #include <tuple>
58 #include <utility>
59 #include <vector>
60 
61 using namespace llvm;
62 
63 namespace {
64 
65 enum class RegKind {
66   Scalar,
67   NeonVector,
68   SVEDataVector,
69   SVEPredicateVector
70 };
71 
72 enum RegConstraintEqualityTy {
73   EqualsReg,
74   EqualsSuperReg,
75   EqualsSubReg
76 };
77 
78 class AArch64AsmParser : public MCTargetAsmParser {
79 private:
80   StringRef Mnemonic; ///< Instruction mnemonic.
81 
82   // Map of register aliases registers via the .req directive.
83   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
84 
85   class PrefixInfo {
86   public:
87     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
88       PrefixInfo Prefix;
89       switch (Inst.getOpcode()) {
90       case AArch64::MOVPRFX_ZZ:
91         Prefix.Active = true;
92         Prefix.Dst = Inst.getOperand(0).getReg();
93         break;
94       case AArch64::MOVPRFX_ZPmZ_B:
95       case AArch64::MOVPRFX_ZPmZ_H:
96       case AArch64::MOVPRFX_ZPmZ_S:
97       case AArch64::MOVPRFX_ZPmZ_D:
98         Prefix.Active = true;
99         Prefix.Predicated = true;
100         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
101         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
102                "No destructive element size set for movprfx");
103         Prefix.Dst = Inst.getOperand(0).getReg();
104         Prefix.Pg = Inst.getOperand(2).getReg();
105         break;
106       case AArch64::MOVPRFX_ZPzZ_B:
107       case AArch64::MOVPRFX_ZPzZ_H:
108       case AArch64::MOVPRFX_ZPzZ_S:
109       case AArch64::MOVPRFX_ZPzZ_D:
110         Prefix.Active = true;
111         Prefix.Predicated = true;
112         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
113         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
114                "No destructive element size set for movprfx");
115         Prefix.Dst = Inst.getOperand(0).getReg();
116         Prefix.Pg = Inst.getOperand(1).getReg();
117         break;
118       default:
119         break;
120       }
121 
122       return Prefix;
123     }
124 
125     PrefixInfo() : Active(false), Predicated(false) {}
126     bool isActive() const { return Active; }
127     bool isPredicated() const { return Predicated; }
128     unsigned getElementSize() const {
129       assert(Predicated);
130       return ElementSize;
131     }
132     unsigned getDstReg() const { return Dst; }
133     unsigned getPgReg() const {
134       assert(Predicated);
135       return Pg;
136     }
137 
138   private:
139     bool Active;
140     bool Predicated;
141     unsigned ElementSize;
142     unsigned Dst;
143     unsigned Pg;
144   } NextPrefix;
145 
146   AArch64TargetStreamer &getTargetStreamer() {
147     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
148     return static_cast<AArch64TargetStreamer &>(TS);
149   }
150 
151   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
152 
153   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
154   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
155   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
156   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
157   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
158   bool parseRegister(OperandVector &Operands);
159   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
160   bool parseNeonVectorList(OperandVector &Operands);
161   bool parseOptionalMulOperand(OperandVector &Operands);
162   bool parseKeywordOperand(OperandVector &Operands);
163   bool parseOperand(OperandVector &Operands, bool isCondCode,
164                     bool invertCondCode);
165   bool parseImmExpr(int64_t &Out);
166   bool parseComma();
167   bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
168                             unsigned Last);
169 
170   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
171                       OperandVector &Operands);
172 
173   bool parseDirectiveArch(SMLoc L);
174   bool parseDirectiveArchExtension(SMLoc L);
175   bool parseDirectiveCPU(SMLoc L);
176   bool parseDirectiveInst(SMLoc L);
177 
178   bool parseDirectiveTLSDescCall(SMLoc L);
179 
180   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
181   bool parseDirectiveLtorg(SMLoc L);
182 
183   bool parseDirectiveReq(StringRef Name, SMLoc L);
184   bool parseDirectiveUnreq(SMLoc L);
185   bool parseDirectiveCFINegateRAState();
186   bool parseDirectiveCFIBKeyFrame();
187 
188   bool parseDirectiveVariantPCS(SMLoc L);
189 
190   bool parseDirectiveSEHAllocStack(SMLoc L);
191   bool parseDirectiveSEHPrologEnd(SMLoc L);
192   bool parseDirectiveSEHSaveR19R20X(SMLoc L);
193   bool parseDirectiveSEHSaveFPLR(SMLoc L);
194   bool parseDirectiveSEHSaveFPLRX(SMLoc L);
195   bool parseDirectiveSEHSaveReg(SMLoc L);
196   bool parseDirectiveSEHSaveRegX(SMLoc L);
197   bool parseDirectiveSEHSaveRegP(SMLoc L);
198   bool parseDirectiveSEHSaveRegPX(SMLoc L);
199   bool parseDirectiveSEHSaveLRPair(SMLoc L);
200   bool parseDirectiveSEHSaveFReg(SMLoc L);
201   bool parseDirectiveSEHSaveFRegX(SMLoc L);
202   bool parseDirectiveSEHSaveFRegP(SMLoc L);
203   bool parseDirectiveSEHSaveFRegPX(SMLoc L);
204   bool parseDirectiveSEHSetFP(SMLoc L);
205   bool parseDirectiveSEHAddFP(SMLoc L);
206   bool parseDirectiveSEHNop(SMLoc L);
207   bool parseDirectiveSEHSaveNext(SMLoc L);
208   bool parseDirectiveSEHEpilogStart(SMLoc L);
209   bool parseDirectiveSEHEpilogEnd(SMLoc L);
210   bool parseDirectiveSEHTrapFrame(SMLoc L);
211   bool parseDirectiveSEHMachineFrame(SMLoc L);
212   bool parseDirectiveSEHContext(SMLoc L);
213   bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
214 
215   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
216                            SmallVectorImpl<SMLoc> &Loc);
217   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
218                                OperandVector &Operands, MCStreamer &Out,
219                                uint64_t &ErrorInfo,
220                                bool MatchingInlineAsm) override;
221 /// @name Auto-generated Match Functions
222 /// {
223 
224 #define GET_ASSEMBLER_HEADER
225 #include "AArch64GenAsmMatcher.inc"
226 
227   /// }
228 
229   OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
230   OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
231                                               RegKind MatchKind);
232   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
233   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
234   OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
235   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
236   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
237   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
238   template <bool IsSVEPrefetch = false>
239   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
240   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
241   OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
242   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
243   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
244   template<bool AddFPZeroAsLiteral>
245   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
246   OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
247   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
248   bool tryParseNeonVectorRegister(OperandVector &Operands);
249   OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
250   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
251   template <bool ParseShiftExtend,
252             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
253   OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
254   template <bool ParseShiftExtend, bool ParseSuffix>
255   OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
256   OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
257   template <RegKind VectorKind>
258   OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
259                                           bool ExpectMatch = false);
260   OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
261   OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
262 
263 public:
264   enum AArch64MatchResultTy {
265     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
266 #define GET_OPERAND_DIAGNOSTIC_TYPES
267 #include "AArch64GenAsmMatcher.inc"
268   };
269   bool IsILP32;
270 
271   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
272                    const MCInstrInfo &MII, const MCTargetOptions &Options)
273     : MCTargetAsmParser(Options, STI, MII) {
274     IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
275     MCAsmParserExtension::Initialize(Parser);
276     MCStreamer &S = getParser().getStreamer();
277     if (S.getTargetStreamer() == nullptr)
278       new AArch64TargetStreamer(S);
279 
280     // Alias .hword/.word/.[dx]word to the target-independent
281     // .2byte/.4byte/.8byte directives as they have the same form and
282     // semantics:
283     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
284     Parser.addAliasForDirective(".hword", ".2byte");
285     Parser.addAliasForDirective(".word", ".4byte");
286     Parser.addAliasForDirective(".dword", ".8byte");
287     Parser.addAliasForDirective(".xword", ".8byte");
288 
289     // Initialize the set of available features.
290     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
291   }
292 
293   bool regsEqual(const MCParsedAsmOperand &Op1,
294                  const MCParsedAsmOperand &Op2) const override;
295   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
296                         SMLoc NameLoc, OperandVector &Operands) override;
297   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
298   OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
299                                         SMLoc &EndLoc) override;
300   bool ParseDirective(AsmToken DirectiveID) override;
301   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
302                                       unsigned Kind) override;
303 
304   static bool classifySymbolRef(const MCExpr *Expr,
305                                 AArch64MCExpr::VariantKind &ELFRefKind,
306                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
307                                 int64_t &Addend);
308 };
309 
310 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
311 /// instruction.
312 class AArch64Operand : public MCParsedAsmOperand {
313 private:
314   enum KindTy {
315     k_Immediate,
316     k_ShiftedImm,
317     k_CondCode,
318     k_Register,
319     k_VectorList,
320     k_VectorIndex,
321     k_Token,
322     k_SysReg,
323     k_SysCR,
324     k_Prefetch,
325     k_ShiftExtend,
326     k_FPImm,
327     k_Barrier,
328     k_PSBHint,
329     k_BTIHint,
330   } Kind;
331 
332   SMLoc StartLoc, EndLoc;
333 
334   struct TokOp {
335     const char *Data;
336     unsigned Length;
337     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
338   };
339 
340   // Separate shift/extend operand.
341   struct ShiftExtendOp {
342     AArch64_AM::ShiftExtendType Type;
343     unsigned Amount;
344     bool HasExplicitAmount;
345   };
346 
347   struct RegOp {
348     unsigned RegNum;
349     RegKind Kind;
350     int ElementWidth;
351 
352     // The register may be allowed as a different register class,
353     // e.g. for GPR64as32 or GPR32as64.
354     RegConstraintEqualityTy EqualityTy;
355 
356     // In some cases the shift/extend needs to be explicitly parsed together
357     // with the register, rather than as a separate operand. This is needed
358     // for addressing modes where the instruction as a whole dictates the
359     // scaling/extend, rather than specific bits in the instruction.
360     // By parsing them as a single operand, we avoid the need to pass an
361     // extra operand in all CodeGen patterns (because all operands need to
362     // have an associated value), and we avoid the need to update TableGen to
363     // accept operands that have no associated bits in the instruction.
364     //
365     // An added benefit of parsing them together is that the assembler
366     // can give a sensible diagnostic if the scaling is not correct.
367     //
368     // The default is 'lsl #0' (HasExplicitAmount = false) if no
369     // ShiftExtend is specified.
370     ShiftExtendOp ShiftExtend;
371   };
372 
373   struct VectorListOp {
374     unsigned RegNum;
375     unsigned Count;
376     unsigned NumElements;
377     unsigned ElementWidth;
378     RegKind  RegisterKind;
379   };
380 
381   struct VectorIndexOp {
382     unsigned Val;
383   };
384 
385   struct ImmOp {
386     const MCExpr *Val;
387   };
388 
389   struct ShiftedImmOp {
390     const MCExpr *Val;
391     unsigned ShiftAmount;
392   };
393 
394   struct CondCodeOp {
395     AArch64CC::CondCode Code;
396   };
397 
398   struct FPImmOp {
399     uint64_t Val; // APFloat value bitcasted to uint64_t.
400     bool IsExact; // describes whether parsed value was exact.
401   };
402 
403   struct BarrierOp {
404     const char *Data;
405     unsigned Length;
406     unsigned Val; // Not the enum since not all values have names.
407     bool HasnXSModifier;
408   };
409 
410   struct SysRegOp {
411     const char *Data;
412     unsigned Length;
413     uint32_t MRSReg;
414     uint32_t MSRReg;
415     uint32_t PStateField;
416   };
417 
418   struct SysCRImmOp {
419     unsigned Val;
420   };
421 
422   struct PrefetchOp {
423     const char *Data;
424     unsigned Length;
425     unsigned Val;
426   };
427 
428   struct PSBHintOp {
429     const char *Data;
430     unsigned Length;
431     unsigned Val;
432   };
433 
434   struct BTIHintOp {
435     const char *Data;
436     unsigned Length;
437     unsigned Val;
438   };
439 
440   struct ExtendOp {
441     unsigned Val;
442   };
443 
444   union {
445     struct TokOp Tok;
446     struct RegOp Reg;
447     struct VectorListOp VectorList;
448     struct VectorIndexOp VectorIndex;
449     struct ImmOp Imm;
450     struct ShiftedImmOp ShiftedImm;
451     struct CondCodeOp CondCode;
452     struct FPImmOp FPImm;
453     struct BarrierOp Barrier;
454     struct SysRegOp SysReg;
455     struct SysCRImmOp SysCRImm;
456     struct PrefetchOp Prefetch;
457     struct PSBHintOp PSBHint;
458     struct BTIHintOp BTIHint;
459     struct ShiftExtendOp ShiftExtend;
460   };
461 
462   // Keep the MCContext around as the MCExprs may need manipulated during
463   // the add<>Operands() calls.
464   MCContext &Ctx;
465 
466 public:
467   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
468 
469   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
470     Kind = o.Kind;
471     StartLoc = o.StartLoc;
472     EndLoc = o.EndLoc;
473     switch (Kind) {
474     case k_Token:
475       Tok = o.Tok;
476       break;
477     case k_Immediate:
478       Imm = o.Imm;
479       break;
480     case k_ShiftedImm:
481       ShiftedImm = o.ShiftedImm;
482       break;
483     case k_CondCode:
484       CondCode = o.CondCode;
485       break;
486     case k_FPImm:
487       FPImm = o.FPImm;
488       break;
489     case k_Barrier:
490       Barrier = o.Barrier;
491       break;
492     case k_Register:
493       Reg = o.Reg;
494       break;
495     case k_VectorList:
496       VectorList = o.VectorList;
497       break;
498     case k_VectorIndex:
499       VectorIndex = o.VectorIndex;
500       break;
501     case k_SysReg:
502       SysReg = o.SysReg;
503       break;
504     case k_SysCR:
505       SysCRImm = o.SysCRImm;
506       break;
507     case k_Prefetch:
508       Prefetch = o.Prefetch;
509       break;
510     case k_PSBHint:
511       PSBHint = o.PSBHint;
512       break;
513     case k_BTIHint:
514       BTIHint = o.BTIHint;
515       break;
516     case k_ShiftExtend:
517       ShiftExtend = o.ShiftExtend;
518       break;
519     }
520   }
521 
522   /// getStartLoc - Get the location of the first token of this operand.
523   SMLoc getStartLoc() const override { return StartLoc; }
524   /// getEndLoc - Get the location of the last token of this operand.
525   SMLoc getEndLoc() const override { return EndLoc; }
526 
527   StringRef getToken() const {
528     assert(Kind == k_Token && "Invalid access!");
529     return StringRef(Tok.Data, Tok.Length);
530   }
531 
532   bool isTokenSuffix() const {
533     assert(Kind == k_Token && "Invalid access!");
534     return Tok.IsSuffix;
535   }
536 
537   const MCExpr *getImm() const {
538     assert(Kind == k_Immediate && "Invalid access!");
539     return Imm.Val;
540   }
541 
542   const MCExpr *getShiftedImmVal() const {
543     assert(Kind == k_ShiftedImm && "Invalid access!");
544     return ShiftedImm.Val;
545   }
546 
547   unsigned getShiftedImmShift() const {
548     assert(Kind == k_ShiftedImm && "Invalid access!");
549     return ShiftedImm.ShiftAmount;
550   }
551 
552   AArch64CC::CondCode getCondCode() const {
553     assert(Kind == k_CondCode && "Invalid access!");
554     return CondCode.Code;
555   }
556 
557   APFloat getFPImm() const {
558     assert (Kind == k_FPImm && "Invalid access!");
559     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
560   }
561 
562   bool getFPImmIsExact() const {
563     assert (Kind == k_FPImm && "Invalid access!");
564     return FPImm.IsExact;
565   }
566 
567   unsigned getBarrier() const {
568     assert(Kind == k_Barrier && "Invalid access!");
569     return Barrier.Val;
570   }
571 
572   StringRef getBarrierName() const {
573     assert(Kind == k_Barrier && "Invalid access!");
574     return StringRef(Barrier.Data, Barrier.Length);
575   }
576 
577   bool getBarriernXSModifier() const {
578     assert(Kind == k_Barrier && "Invalid access!");
579     return Barrier.HasnXSModifier;
580   }
581 
582   unsigned getReg() const override {
583     assert(Kind == k_Register && "Invalid access!");
584     return Reg.RegNum;
585   }
586 
587   RegConstraintEqualityTy getRegEqualityTy() const {
588     assert(Kind == k_Register && "Invalid access!");
589     return Reg.EqualityTy;
590   }
591 
592   unsigned getVectorListStart() const {
593     assert(Kind == k_VectorList && "Invalid access!");
594     return VectorList.RegNum;
595   }
596 
597   unsigned getVectorListCount() const {
598     assert(Kind == k_VectorList && "Invalid access!");
599     return VectorList.Count;
600   }
601 
602   unsigned getVectorIndex() const {
603     assert(Kind == k_VectorIndex && "Invalid access!");
604     return VectorIndex.Val;
605   }
606 
607   StringRef getSysReg() const {
608     assert(Kind == k_SysReg && "Invalid access!");
609     return StringRef(SysReg.Data, SysReg.Length);
610   }
611 
612   unsigned getSysCR() const {
613     assert(Kind == k_SysCR && "Invalid access!");
614     return SysCRImm.Val;
615   }
616 
617   unsigned getPrefetch() const {
618     assert(Kind == k_Prefetch && "Invalid access!");
619     return Prefetch.Val;
620   }
621 
622   unsigned getPSBHint() const {
623     assert(Kind == k_PSBHint && "Invalid access!");
624     return PSBHint.Val;
625   }
626 
627   StringRef getPSBHintName() const {
628     assert(Kind == k_PSBHint && "Invalid access!");
629     return StringRef(PSBHint.Data, PSBHint.Length);
630   }
631 
632   unsigned getBTIHint() const {
633     assert(Kind == k_BTIHint && "Invalid access!");
634     return BTIHint.Val;
635   }
636 
637   StringRef getBTIHintName() const {
638     assert(Kind == k_BTIHint && "Invalid access!");
639     return StringRef(BTIHint.Data, BTIHint.Length);
640   }
641 
642   StringRef getPrefetchName() const {
643     assert(Kind == k_Prefetch && "Invalid access!");
644     return StringRef(Prefetch.Data, Prefetch.Length);
645   }
646 
647   AArch64_AM::ShiftExtendType getShiftExtendType() const {
648     if (Kind == k_ShiftExtend)
649       return ShiftExtend.Type;
650     if (Kind == k_Register)
651       return Reg.ShiftExtend.Type;
652     llvm_unreachable("Invalid access!");
653   }
654 
655   unsigned getShiftExtendAmount() const {
656     if (Kind == k_ShiftExtend)
657       return ShiftExtend.Amount;
658     if (Kind == k_Register)
659       return Reg.ShiftExtend.Amount;
660     llvm_unreachable("Invalid access!");
661   }
662 
663   bool hasShiftExtendAmount() const {
664     if (Kind == k_ShiftExtend)
665       return ShiftExtend.HasExplicitAmount;
666     if (Kind == k_Register)
667       return Reg.ShiftExtend.HasExplicitAmount;
668     llvm_unreachable("Invalid access!");
669   }
670 
671   bool isImm() const override { return Kind == k_Immediate; }
672   bool isMem() const override { return false; }
673 
674   bool isUImm6() const {
675     if (!isImm())
676       return false;
677     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
678     if (!MCE)
679       return false;
680     int64_t Val = MCE->getValue();
681     return (Val >= 0 && Val < 64);
682   }
683 
684   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
685 
686   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
687     return isImmScaled<Bits, Scale>(true);
688   }
689 
690   template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
691     return isImmScaled<Bits, Scale>(false);
692   }
693 
694   template <int Bits, int Scale>
695   DiagnosticPredicate isImmScaled(bool Signed) const {
696     if (!isImm())
697       return DiagnosticPredicateTy::NoMatch;
698 
699     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
700     if (!MCE)
701       return DiagnosticPredicateTy::NoMatch;
702 
703     int64_t MinVal, MaxVal;
704     if (Signed) {
705       int64_t Shift = Bits - 1;
706       MinVal = (int64_t(1) << Shift) * -Scale;
707       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
708     } else {
709       MinVal = 0;
710       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
711     }
712 
713     int64_t Val = MCE->getValue();
714     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
715       return DiagnosticPredicateTy::Match;
716 
717     return DiagnosticPredicateTy::NearMatch;
718   }
719 
720   DiagnosticPredicate isSVEPattern() const {
721     if (!isImm())
722       return DiagnosticPredicateTy::NoMatch;
723     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
724     if (!MCE)
725       return DiagnosticPredicateTy::NoMatch;
726     int64_t Val = MCE->getValue();
727     if (Val >= 0 && Val < 32)
728       return DiagnosticPredicateTy::Match;
729     return DiagnosticPredicateTy::NearMatch;
730   }
731 
732   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
733     AArch64MCExpr::VariantKind ELFRefKind;
734     MCSymbolRefExpr::VariantKind DarwinRefKind;
735     int64_t Addend;
736     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
737                                            Addend)) {
738       // If we don't understand the expression, assume the best and
739       // let the fixup and relocation code deal with it.
740       return true;
741     }
742 
743     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
744         ELFRefKind == AArch64MCExpr::VK_LO12 ||
745         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
746         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
747         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
748         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
749         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
750         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
751         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
752         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
753         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
754         ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
755       // Note that we don't range-check the addend. It's adjusted modulo page
756       // size when converted, so there is no "out of range" condition when using
757       // @pageoff.
758       return true;
759     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
760                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
761       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
762       return Addend == 0;
763     }
764 
765     return false;
766   }
767 
768   template <int Scale> bool isUImm12Offset() const {
769     if (!isImm())
770       return false;
771 
772     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
773     if (!MCE)
774       return isSymbolicUImm12Offset(getImm());
775 
776     int64_t Val = MCE->getValue();
777     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
778   }
779 
780   template <int N, int M>
781   bool isImmInRange() const {
782     if (!isImm())
783       return false;
784     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
785     if (!MCE)
786       return false;
787     int64_t Val = MCE->getValue();
788     return (Val >= N && Val <= M);
789   }
790 
791   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
792   // a logical immediate can always be represented when inverted.
793   template <typename T>
794   bool isLogicalImm() const {
795     if (!isImm())
796       return false;
797     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
798     if (!MCE)
799       return false;
800 
801     int64_t Val = MCE->getValue();
802     // Avoid left shift by 64 directly.
803     uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
804     // Allow all-0 or all-1 in top bits to permit bitwise NOT.
805     if ((Val & Upper) && (Val & Upper) != Upper)
806       return false;
807 
808     return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
809   }
810 
811   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
812 
813   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
814   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
815   /// immediate that can be shifted by 'Shift'.
816   template <unsigned Width>
817   Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
818     if (isShiftedImm() && Width == getShiftedImmShift())
819       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
820         return std::make_pair(CE->getValue(), Width);
821 
822     if (isImm())
823       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
824         int64_t Val = CE->getValue();
825         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
826           return std::make_pair(Val >> Width, Width);
827         else
828           return std::make_pair(Val, 0u);
829       }
830 
831     return {};
832   }
833 
834   bool isAddSubImm() const {
835     if (!isShiftedImm() && !isImm())
836       return false;
837 
838     const MCExpr *Expr;
839 
840     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
841     if (isShiftedImm()) {
842       unsigned Shift = ShiftedImm.ShiftAmount;
843       Expr = ShiftedImm.Val;
844       if (Shift != 0 && Shift != 12)
845         return false;
846     } else {
847       Expr = getImm();
848     }
849 
850     AArch64MCExpr::VariantKind ELFRefKind;
851     MCSymbolRefExpr::VariantKind DarwinRefKind;
852     int64_t Addend;
853     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
854                                           DarwinRefKind, Addend)) {
855       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
856           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
857           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
858           || ELFRefKind == AArch64MCExpr::VK_LO12
859           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
860           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
861           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
862           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
863           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
864           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
865           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
866           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
867           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
868     }
869 
870     // If it's a constant, it should be a real immediate in range.
871     if (auto ShiftedVal = getShiftedVal<12>())
872       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
873 
874     // If it's an expression, we hope for the best and let the fixup/relocation
875     // code deal with it.
876     return true;
877   }
878 
879   bool isAddSubImmNeg() const {
880     if (!isShiftedImm() && !isImm())
881       return false;
882 
883     // Otherwise it should be a real negative immediate in range.
884     if (auto ShiftedVal = getShiftedVal<12>())
885       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
886 
887     return false;
888   }
889 
890   // Signed value in the range -128 to +127. For element widths of
891   // 16 bits or higher it may also be a signed multiple of 256 in the
892   // range -32768 to +32512.
893   // For element-width of 8 bits a range of -128 to 255 is accepted,
894   // since a copy of a byte can be either signed/unsigned.
895   template <typename T>
896   DiagnosticPredicate isSVECpyImm() const {
897     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
898       return DiagnosticPredicateTy::NoMatch;
899 
900     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
901                   std::is_same<int8_t, T>::value;
902     if (auto ShiftedImm = getShiftedVal<8>())
903       if (!(IsByte && ShiftedImm->second) &&
904           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
905                                      << ShiftedImm->second))
906         return DiagnosticPredicateTy::Match;
907 
908     return DiagnosticPredicateTy::NearMatch;
909   }
910 
911   // Unsigned value in the range 0 to 255. For element widths of
912   // 16 bits or higher it may also be a signed multiple of 256 in the
913   // range 0 to 65280.
914   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
915     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
916       return DiagnosticPredicateTy::NoMatch;
917 
918     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
919                   std::is_same<int8_t, T>::value;
920     if (auto ShiftedImm = getShiftedVal<8>())
921       if (!(IsByte && ShiftedImm->second) &&
922           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
923                                         << ShiftedImm->second))
924         return DiagnosticPredicateTy::Match;
925 
926     return DiagnosticPredicateTy::NearMatch;
927   }
928 
929   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
930     if (isLogicalImm<T>() && !isSVECpyImm<T>())
931       return DiagnosticPredicateTy::Match;
932     return DiagnosticPredicateTy::NoMatch;
933   }
934 
935   bool isCondCode() const { return Kind == k_CondCode; }
936 
937   bool isSIMDImmType10() const {
938     if (!isImm())
939       return false;
940     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
941     if (!MCE)
942       return false;
943     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
944   }
945 
946   template<int N>
947   bool isBranchTarget() const {
948     if (!isImm())
949       return false;
950     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
951     if (!MCE)
952       return true;
953     int64_t Val = MCE->getValue();
954     if (Val & 0x3)
955       return false;
956     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
957     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
958   }
959 
960   bool
961   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
962     if (!isImm())
963       return false;
964 
965     AArch64MCExpr::VariantKind ELFRefKind;
966     MCSymbolRefExpr::VariantKind DarwinRefKind;
967     int64_t Addend;
968     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
969                                              DarwinRefKind, Addend)) {
970       return false;
971     }
972     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
973       return false;
974 
975     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
976       if (ELFRefKind == AllowedModifiers[i])
977         return true;
978     }
979 
980     return false;
981   }
982 
983   bool isMovWSymbolG3() const {
984     return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
985   }
986 
987   bool isMovWSymbolG2() const {
988     return isMovWSymbol(
989         {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
990          AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
991          AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
992          AArch64MCExpr::VK_DTPREL_G2});
993   }
994 
995   bool isMovWSymbolG1() const {
996     return isMovWSymbol(
997         {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
998          AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
999          AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1000          AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1001          AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1002   }
1003 
1004   bool isMovWSymbolG0() const {
1005     return isMovWSymbol(
1006         {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1007          AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1008          AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1009          AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1010          AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1011   }
1012 
1013   template<int RegWidth, int Shift>
1014   bool isMOVZMovAlias() const {
1015     if (!isImm()) return false;
1016 
1017     const MCExpr *E = getImm();
1018     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1019       uint64_t Value = CE->getValue();
1020 
1021       return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1022     }
1023     // Only supports the case of Shift being 0 if an expression is used as an
1024     // operand
1025     return !Shift && E;
1026   }
1027 
1028   template<int RegWidth, int Shift>
1029   bool isMOVNMovAlias() const {
1030     if (!isImm()) return false;
1031 
1032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1033     if (!CE) return false;
1034     uint64_t Value = CE->getValue();
1035 
1036     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1037   }
1038 
1039   bool isFPImm() const {
1040     return Kind == k_FPImm &&
1041            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1042   }
1043 
1044   bool isBarrier() const {
1045     return Kind == k_Barrier && !getBarriernXSModifier();
1046   }
1047   bool isBarriernXS() const {
1048     return Kind == k_Barrier && getBarriernXSModifier();
1049   }
1050   bool isSysReg() const { return Kind == k_SysReg; }
1051 
1052   bool isMRSSystemRegister() const {
1053     if (!isSysReg()) return false;
1054 
1055     return SysReg.MRSReg != -1U;
1056   }
1057 
1058   bool isMSRSystemRegister() const {
1059     if (!isSysReg()) return false;
1060     return SysReg.MSRReg != -1U;
1061   }
1062 
1063   bool isSystemPStateFieldWithImm0_1() const {
1064     if (!isSysReg()) return false;
1065     return (SysReg.PStateField == AArch64PState::PAN ||
1066             SysReg.PStateField == AArch64PState::DIT ||
1067             SysReg.PStateField == AArch64PState::UAO ||
1068             SysReg.PStateField == AArch64PState::SSBS);
1069   }
1070 
1071   bool isSystemPStateFieldWithImm0_15() const {
1072     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1073     return SysReg.PStateField != -1U;
1074   }
1075 
1076   bool isReg() const override {
1077     return Kind == k_Register;
1078   }
1079 
1080   bool isScalarReg() const {
1081     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1082   }
1083 
1084   bool isNeonVectorReg() const {
1085     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1086   }
1087 
1088   bool isNeonVectorRegLo() const {
1089     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1090            (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1091                 Reg.RegNum) ||
1092             AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1093                 Reg.RegNum));
1094   }
1095 
1096   template <unsigned Class> bool isSVEVectorReg() const {
1097     RegKind RK;
1098     switch (Class) {
1099     case AArch64::ZPRRegClassID:
1100     case AArch64::ZPR_3bRegClassID:
1101     case AArch64::ZPR_4bRegClassID:
1102       RK = RegKind::SVEDataVector;
1103       break;
1104     case AArch64::PPRRegClassID:
1105     case AArch64::PPR_3bRegClassID:
1106       RK = RegKind::SVEPredicateVector;
1107       break;
1108     default:
1109       llvm_unreachable("Unsupport register class");
1110     }
1111 
1112     return (Kind == k_Register && Reg.Kind == RK) &&
1113            AArch64MCRegisterClasses[Class].contains(getReg());
1114   }
1115 
1116   template <unsigned Class> bool isFPRasZPR() const {
1117     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1118            AArch64MCRegisterClasses[Class].contains(getReg());
1119   }
1120 
1121   template <int ElementWidth, unsigned Class>
1122   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1123     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1124       return DiagnosticPredicateTy::NoMatch;
1125 
1126     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1127       return DiagnosticPredicateTy::Match;
1128 
1129     return DiagnosticPredicateTy::NearMatch;
1130   }
1131 
1132   template <int ElementWidth, unsigned Class>
1133   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1134     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1135       return DiagnosticPredicateTy::NoMatch;
1136 
1137     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1138       return DiagnosticPredicateTy::Match;
1139 
1140     return DiagnosticPredicateTy::NearMatch;
1141   }
1142 
1143   template <int ElementWidth, unsigned Class,
1144             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1145             bool ShiftWidthAlwaysSame>
1146   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1147     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1148     if (!VectorMatch.isMatch())
1149       return DiagnosticPredicateTy::NoMatch;
1150 
1151     // Give a more specific diagnostic when the user has explicitly typed in
1152     // a shift-amount that does not match what is expected, but for which
1153     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1154     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1155     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1156                         ShiftExtendTy == AArch64_AM::SXTW) &&
1157         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1158       return DiagnosticPredicateTy::NoMatch;
1159 
1160     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1161       return DiagnosticPredicateTy::Match;
1162 
1163     return DiagnosticPredicateTy::NearMatch;
1164   }
1165 
1166   bool isGPR32as64() const {
1167     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1168       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1169   }
1170 
1171   bool isGPR64as32() const {
1172     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1173       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1174   }
1175 
1176   bool isGPR64x8() const {
1177     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1178            AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1179                Reg.RegNum);
1180   }
1181 
1182   bool isWSeqPair() const {
1183     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1184            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1185                Reg.RegNum);
1186   }
1187 
1188   bool isXSeqPair() const {
1189     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1190            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1191                Reg.RegNum);
1192   }
1193 
1194   template<int64_t Angle, int64_t Remainder>
1195   DiagnosticPredicate isComplexRotation() const {
1196     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1197 
1198     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199     if (!CE) return DiagnosticPredicateTy::NoMatch;
1200     uint64_t Value = CE->getValue();
1201 
1202     if (Value % Angle == Remainder && Value <= 270)
1203       return DiagnosticPredicateTy::Match;
1204     return DiagnosticPredicateTy::NearMatch;
1205   }
1206 
1207   template <unsigned RegClassID> bool isGPR64() const {
1208     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1209            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1210   }
1211 
1212   template <unsigned RegClassID, int ExtWidth>
1213   DiagnosticPredicate isGPR64WithShiftExtend() const {
1214     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1215       return DiagnosticPredicateTy::NoMatch;
1216 
1217     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1218         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1219       return DiagnosticPredicateTy::Match;
1220     return DiagnosticPredicateTy::NearMatch;
1221   }
1222 
1223   /// Is this a vector list with the type implicit (presumably attached to the
1224   /// instruction itself)?
1225   template <RegKind VectorKind, unsigned NumRegs>
1226   bool isImplicitlyTypedVectorList() const {
1227     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1228            VectorList.NumElements == 0 &&
1229            VectorList.RegisterKind == VectorKind;
1230   }
1231 
1232   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1233             unsigned ElementWidth>
1234   bool isTypedVectorList() const {
1235     if (Kind != k_VectorList)
1236       return false;
1237     if (VectorList.Count != NumRegs)
1238       return false;
1239     if (VectorList.RegisterKind != VectorKind)
1240       return false;
1241     if (VectorList.ElementWidth != ElementWidth)
1242       return false;
1243     return VectorList.NumElements == NumElements;
1244   }
1245 
1246   template <int Min, int Max>
1247   DiagnosticPredicate isVectorIndex() const {
1248     if (Kind != k_VectorIndex)
1249       return DiagnosticPredicateTy::NoMatch;
1250     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1251       return DiagnosticPredicateTy::Match;
1252     return DiagnosticPredicateTy::NearMatch;
1253   }
1254 
1255   bool isToken() const override { return Kind == k_Token; }
1256 
1257   bool isTokenEqual(StringRef Str) const {
1258     return Kind == k_Token && getToken() == Str;
1259   }
1260   bool isSysCR() const { return Kind == k_SysCR; }
1261   bool isPrefetch() const { return Kind == k_Prefetch; }
1262   bool isPSBHint() const { return Kind == k_PSBHint; }
1263   bool isBTIHint() const { return Kind == k_BTIHint; }
1264   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1265   bool isShifter() const {
1266     if (!isShiftExtend())
1267       return false;
1268 
1269     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1270     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1271             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1272             ST == AArch64_AM::MSL);
1273   }
1274 
1275   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1276     if (Kind != k_FPImm)
1277       return DiagnosticPredicateTy::NoMatch;
1278 
1279     if (getFPImmIsExact()) {
1280       // Lookup the immediate from table of supported immediates.
1281       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1282       assert(Desc && "Unknown enum value");
1283 
1284       // Calculate its FP value.
1285       APFloat RealVal(APFloat::IEEEdouble());
1286       auto StatusOrErr =
1287           RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1288       if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1289         llvm_unreachable("FP immediate is not exact");
1290 
1291       if (getFPImm().bitwiseIsEqual(RealVal))
1292         return DiagnosticPredicateTy::Match;
1293     }
1294 
1295     return DiagnosticPredicateTy::NearMatch;
1296   }
1297 
1298   template <unsigned ImmA, unsigned ImmB>
1299   DiagnosticPredicate isExactFPImm() const {
1300     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1301     if ((Res = isExactFPImm<ImmA>()))
1302       return DiagnosticPredicateTy::Match;
1303     if ((Res = isExactFPImm<ImmB>()))
1304       return DiagnosticPredicateTy::Match;
1305     return Res;
1306   }
1307 
1308   bool isExtend() const {
1309     if (!isShiftExtend())
1310       return false;
1311 
1312     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1313     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1314             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1315             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1316             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1317             ET == AArch64_AM::LSL) &&
1318            getShiftExtendAmount() <= 4;
1319   }
1320 
1321   bool isExtend64() const {
1322     if (!isExtend())
1323       return false;
1324     // Make sure the extend expects a 32-bit source register.
1325     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1326     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1327            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1328            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1329   }
1330 
1331   bool isExtendLSL64() const {
1332     if (!isExtend())
1333       return false;
1334     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1335     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1336             ET == AArch64_AM::LSL) &&
1337            getShiftExtendAmount() <= 4;
1338   }
1339 
1340   template<int Width> bool isMemXExtend() const {
1341     if (!isExtend())
1342       return false;
1343     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1344     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1345            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1346             getShiftExtendAmount() == 0);
1347   }
1348 
1349   template<int Width> bool isMemWExtend() const {
1350     if (!isExtend())
1351       return false;
1352     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1353     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1354            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1355             getShiftExtendAmount() == 0);
1356   }
1357 
1358   template <unsigned width>
1359   bool isArithmeticShifter() const {
1360     if (!isShifter())
1361       return false;
1362 
1363     // An arithmetic shifter is LSL, LSR, or ASR.
1364     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1365     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1366             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1367   }
1368 
1369   template <unsigned width>
1370   bool isLogicalShifter() const {
1371     if (!isShifter())
1372       return false;
1373 
1374     // A logical shifter is LSL, LSR, ASR or ROR.
1375     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1376     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1377             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1378            getShiftExtendAmount() < width;
1379   }
1380 
1381   bool isMovImm32Shifter() const {
1382     if (!isShifter())
1383       return false;
1384 
1385     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1386     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1387     if (ST != AArch64_AM::LSL)
1388       return false;
1389     uint64_t Val = getShiftExtendAmount();
1390     return (Val == 0 || Val == 16);
1391   }
1392 
1393   bool isMovImm64Shifter() const {
1394     if (!isShifter())
1395       return false;
1396 
1397     // A MOVi shifter is LSL of 0 or 16.
1398     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1399     if (ST != AArch64_AM::LSL)
1400       return false;
1401     uint64_t Val = getShiftExtendAmount();
1402     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1403   }
1404 
1405   bool isLogicalVecShifter() const {
1406     if (!isShifter())
1407       return false;
1408 
1409     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1410     unsigned Shift = getShiftExtendAmount();
1411     return getShiftExtendType() == AArch64_AM::LSL &&
1412            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1413   }
1414 
1415   bool isLogicalVecHalfWordShifter() const {
1416     if (!isLogicalVecShifter())
1417       return false;
1418 
1419     // A logical vector shifter is a left shift by 0 or 8.
1420     unsigned Shift = getShiftExtendAmount();
1421     return getShiftExtendType() == AArch64_AM::LSL &&
1422            (Shift == 0 || Shift == 8);
1423   }
1424 
1425   bool isMoveVecShifter() const {
1426     if (!isShiftExtend())
1427       return false;
1428 
1429     // A logical vector shifter is a left shift by 8 or 16.
1430     unsigned Shift = getShiftExtendAmount();
1431     return getShiftExtendType() == AArch64_AM::MSL &&
1432            (Shift == 8 || Shift == 16);
1433   }
1434 
1435   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1436   // to LDUR/STUR when the offset is not legal for the former but is for
1437   // the latter. As such, in addition to checking for being a legal unscaled
1438   // address, also check that it is not a legal scaled address. This avoids
1439   // ambiguity in the matcher.
1440   template<int Width>
1441   bool isSImm9OffsetFB() const {
1442     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1443   }
1444 
1445   bool isAdrpLabel() const {
1446     // Validation was handled during parsing, so we just sanity check that
1447     // something didn't go haywire.
1448     if (!isImm())
1449         return false;
1450 
1451     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1452       int64_t Val = CE->getValue();
1453       int64_t Min = - (4096 * (1LL << (21 - 1)));
1454       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1455       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1456     }
1457 
1458     return true;
1459   }
1460 
1461   bool isAdrLabel() const {
1462     // Validation was handled during parsing, so we just sanity check that
1463     // something didn't go haywire.
1464     if (!isImm())
1465         return false;
1466 
1467     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1468       int64_t Val = CE->getValue();
1469       int64_t Min = - (1LL << (21 - 1));
1470       int64_t Max = ((1LL << (21 - 1)) - 1);
1471       return Val >= Min && Val <= Max;
1472     }
1473 
1474     return true;
1475   }
1476 
1477   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1478     // Add as immediates when possible.  Null MCExpr = 0.
1479     if (!Expr)
1480       Inst.addOperand(MCOperand::createImm(0));
1481     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1482       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1483     else
1484       Inst.addOperand(MCOperand::createExpr(Expr));
1485   }
1486 
1487   void addRegOperands(MCInst &Inst, unsigned N) const {
1488     assert(N == 1 && "Invalid number of operands!");
1489     Inst.addOperand(MCOperand::createReg(getReg()));
1490   }
1491 
1492   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1493     assert(N == 1 && "Invalid number of operands!");
1494     assert(
1495         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1496 
1497     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1498     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1499         RI->getEncodingValue(getReg()));
1500 
1501     Inst.addOperand(MCOperand::createReg(Reg));
1502   }
1503 
1504   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1505     assert(N == 1 && "Invalid number of operands!");
1506     assert(
1507         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1508 
1509     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1510     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1511         RI->getEncodingValue(getReg()));
1512 
1513     Inst.addOperand(MCOperand::createReg(Reg));
1514   }
1515 
1516   template <int Width>
1517   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1518     unsigned Base;
1519     switch (Width) {
1520     case 8:   Base = AArch64::B0; break;
1521     case 16:  Base = AArch64::H0; break;
1522     case 32:  Base = AArch64::S0; break;
1523     case 64:  Base = AArch64::D0; break;
1524     case 128: Base = AArch64::Q0; break;
1525     default:
1526       llvm_unreachable("Unsupported width");
1527     }
1528     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1529   }
1530 
1531   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1532     assert(N == 1 && "Invalid number of operands!");
1533     assert(
1534         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1535     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1536   }
1537 
1538   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1539     assert(N == 1 && "Invalid number of operands!");
1540     assert(
1541         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1542     Inst.addOperand(MCOperand::createReg(getReg()));
1543   }
1544 
1545   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1546     assert(N == 1 && "Invalid number of operands!");
1547     Inst.addOperand(MCOperand::createReg(getReg()));
1548   }
1549 
1550   enum VecListIndexType {
1551     VecListIdx_DReg = 0,
1552     VecListIdx_QReg = 1,
1553     VecListIdx_ZReg = 2,
1554   };
1555 
1556   template <VecListIndexType RegTy, unsigned NumRegs>
1557   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1558     assert(N == 1 && "Invalid number of operands!");
1559     static const unsigned FirstRegs[][5] = {
1560       /* DReg */ { AArch64::Q0,
1561                    AArch64::D0,       AArch64::D0_D1,
1562                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1563       /* QReg */ { AArch64::Q0,
1564                    AArch64::Q0,       AArch64::Q0_Q1,
1565                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1566       /* ZReg */ { AArch64::Z0,
1567                    AArch64::Z0,       AArch64::Z0_Z1,
1568                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1569     };
1570 
1571     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1572            " NumRegs must be <= 4 for ZRegs");
1573 
1574     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1575     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1576                                          FirstRegs[(unsigned)RegTy][0]));
1577   }
1578 
1579   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1580     assert(N == 1 && "Invalid number of operands!");
1581     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1582   }
1583 
1584   template <unsigned ImmIs0, unsigned ImmIs1>
1585   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1586     assert(N == 1 && "Invalid number of operands!");
1587     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1588     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1589   }
1590 
1591   void addImmOperands(MCInst &Inst, unsigned N) const {
1592     assert(N == 1 && "Invalid number of operands!");
1593     // If this is a pageoff symrefexpr with an addend, adjust the addend
1594     // to be only the page-offset portion. Otherwise, just add the expr
1595     // as-is.
1596     addExpr(Inst, getImm());
1597   }
1598 
1599   template <int Shift>
1600   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1601     assert(N == 2 && "Invalid number of operands!");
1602     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1603       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1604       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1605     } else if (isShiftedImm()) {
1606       addExpr(Inst, getShiftedImmVal());
1607       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1608     } else {
1609       addExpr(Inst, getImm());
1610       Inst.addOperand(MCOperand::createImm(0));
1611     }
1612   }
1613 
1614   template <int Shift>
1615   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1616     assert(N == 2 && "Invalid number of operands!");
1617     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1618       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1619       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1620     } else
1621       llvm_unreachable("Not a shifted negative immediate");
1622   }
1623 
1624   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1625     assert(N == 1 && "Invalid number of operands!");
1626     Inst.addOperand(MCOperand::createImm(getCondCode()));
1627   }
1628 
1629   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1630     assert(N == 1 && "Invalid number of operands!");
1631     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1632     if (!MCE)
1633       addExpr(Inst, getImm());
1634     else
1635       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1636   }
1637 
1638   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1639     addImmOperands(Inst, N);
1640   }
1641 
1642   template<int Scale>
1643   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1644     assert(N == 1 && "Invalid number of operands!");
1645     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1646 
1647     if (!MCE) {
1648       Inst.addOperand(MCOperand::createExpr(getImm()));
1649       return;
1650     }
1651     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1652   }
1653 
1654   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1655     assert(N == 1 && "Invalid number of operands!");
1656     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1657     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1658   }
1659 
1660   template <int Scale>
1661   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1662     assert(N == 1 && "Invalid number of operands!");
1663     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1664     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1665   }
1666 
1667   template <typename T>
1668   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1669     assert(N == 1 && "Invalid number of operands!");
1670     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1671     std::make_unsigned_t<T> Val = MCE->getValue();
1672     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1673     Inst.addOperand(MCOperand::createImm(encoding));
1674   }
1675 
1676   template <typename T>
1677   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1678     assert(N == 1 && "Invalid number of operands!");
1679     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1680     std::make_unsigned_t<T> Val = ~MCE->getValue();
1681     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1682     Inst.addOperand(MCOperand::createImm(encoding));
1683   }
1684 
1685   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1686     assert(N == 1 && "Invalid number of operands!");
1687     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1688     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1689     Inst.addOperand(MCOperand::createImm(encoding));
1690   }
1691 
1692   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1693     // Branch operands don't encode the low bits, so shift them off
1694     // here. If it's a label, however, just put it on directly as there's
1695     // not enough information now to do anything.
1696     assert(N == 1 && "Invalid number of operands!");
1697     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1698     if (!MCE) {
1699       addExpr(Inst, getImm());
1700       return;
1701     }
1702     assert(MCE && "Invalid constant immediate operand!");
1703     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1704   }
1705 
1706   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1707     // Branch operands don't encode the low bits, so shift them off
1708     // here. If it's a label, however, just put it on directly as there's
1709     // not enough information now to do anything.
1710     assert(N == 1 && "Invalid number of operands!");
1711     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1712     if (!MCE) {
1713       addExpr(Inst, getImm());
1714       return;
1715     }
1716     assert(MCE && "Invalid constant immediate operand!");
1717     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1718   }
1719 
1720   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1721     // Branch operands don't encode the low bits, so shift them off
1722     // here. If it's a label, however, just put it on directly as there's
1723     // not enough information now to do anything.
1724     assert(N == 1 && "Invalid number of operands!");
1725     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1726     if (!MCE) {
1727       addExpr(Inst, getImm());
1728       return;
1729     }
1730     assert(MCE && "Invalid constant immediate operand!");
1731     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1732   }
1733 
1734   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1735     assert(N == 1 && "Invalid number of operands!");
1736     Inst.addOperand(MCOperand::createImm(
1737         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1738   }
1739 
1740   void addBarrierOperands(MCInst &Inst, unsigned N) const {
1741     assert(N == 1 && "Invalid number of operands!");
1742     Inst.addOperand(MCOperand::createImm(getBarrier()));
1743   }
1744 
1745   void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1746     assert(N == 1 && "Invalid number of operands!");
1747     Inst.addOperand(MCOperand::createImm(getBarrier()));
1748   }
1749 
1750   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1751     assert(N == 1 && "Invalid number of operands!");
1752 
1753     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1754   }
1755 
1756   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1757     assert(N == 1 && "Invalid number of operands!");
1758 
1759     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1760   }
1761 
1762   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1763     assert(N == 1 && "Invalid number of operands!");
1764 
1765     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1766   }
1767 
1768   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1769     assert(N == 1 && "Invalid number of operands!");
1770 
1771     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1772   }
1773 
1774   void addSysCROperands(MCInst &Inst, unsigned N) const {
1775     assert(N == 1 && "Invalid number of operands!");
1776     Inst.addOperand(MCOperand::createImm(getSysCR()));
1777   }
1778 
1779   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1780     assert(N == 1 && "Invalid number of operands!");
1781     Inst.addOperand(MCOperand::createImm(getPrefetch()));
1782   }
1783 
1784   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1785     assert(N == 1 && "Invalid number of operands!");
1786     Inst.addOperand(MCOperand::createImm(getPSBHint()));
1787   }
1788 
1789   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1790     assert(N == 1 && "Invalid number of operands!");
1791     Inst.addOperand(MCOperand::createImm(getBTIHint()));
1792   }
1793 
1794   void addShifterOperands(MCInst &Inst, unsigned N) const {
1795     assert(N == 1 && "Invalid number of operands!");
1796     unsigned Imm =
1797         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1798     Inst.addOperand(MCOperand::createImm(Imm));
1799   }
1800 
1801   void addExtendOperands(MCInst &Inst, unsigned N) const {
1802     assert(N == 1 && "Invalid number of operands!");
1803     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1804     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1805     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1806     Inst.addOperand(MCOperand::createImm(Imm));
1807   }
1808 
1809   void addExtend64Operands(MCInst &Inst, unsigned N) const {
1810     assert(N == 1 && "Invalid number of operands!");
1811     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1812     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1813     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1814     Inst.addOperand(MCOperand::createImm(Imm));
1815   }
1816 
1817   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1818     assert(N == 2 && "Invalid number of operands!");
1819     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1820     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1821     Inst.addOperand(MCOperand::createImm(IsSigned));
1822     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1823   }
1824 
1825   // For 8-bit load/store instructions with a register offset, both the
1826   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1827   // they're disambiguated by whether the shift was explicit or implicit rather
1828   // than its size.
1829   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1830     assert(N == 2 && "Invalid number of operands!");
1831     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1832     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1833     Inst.addOperand(MCOperand::createImm(IsSigned));
1834     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1835   }
1836 
1837   template<int Shift>
1838   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1839     assert(N == 1 && "Invalid number of operands!");
1840 
1841     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842     if (CE) {
1843       uint64_t Value = CE->getValue();
1844       Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1845     } else {
1846       addExpr(Inst, getImm());
1847     }
1848   }
1849 
1850   template<int Shift>
1851   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1852     assert(N == 1 && "Invalid number of operands!");
1853 
1854     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1855     uint64_t Value = CE->getValue();
1856     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1857   }
1858 
1859   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1860     assert(N == 1 && "Invalid number of operands!");
1861     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1862     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1863   }
1864 
1865   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1866     assert(N == 1 && "Invalid number of operands!");
1867     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1868     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1869   }
1870 
1871   void print(raw_ostream &OS) const override;
1872 
1873   static std::unique_ptr<AArch64Operand>
1874   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1875     auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1876     Op->Tok.Data = Str.data();
1877     Op->Tok.Length = Str.size();
1878     Op->Tok.IsSuffix = IsSuffix;
1879     Op->StartLoc = S;
1880     Op->EndLoc = S;
1881     return Op;
1882   }
1883 
1884   static std::unique_ptr<AArch64Operand>
1885   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1886             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1887             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1888             unsigned ShiftAmount = 0,
1889             unsigned HasExplicitAmount = false) {
1890     auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1891     Op->Reg.RegNum = RegNum;
1892     Op->Reg.Kind = Kind;
1893     Op->Reg.ElementWidth = 0;
1894     Op->Reg.EqualityTy = EqTy;
1895     Op->Reg.ShiftExtend.Type = ExtTy;
1896     Op->Reg.ShiftExtend.Amount = ShiftAmount;
1897     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1898     Op->StartLoc = S;
1899     Op->EndLoc = E;
1900     return Op;
1901   }
1902 
1903   static std::unique_ptr<AArch64Operand>
1904   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1905                   SMLoc S, SMLoc E, MCContext &Ctx,
1906                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1907                   unsigned ShiftAmount = 0,
1908                   unsigned HasExplicitAmount = false) {
1909     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1910             Kind == RegKind::SVEPredicateVector) &&
1911            "Invalid vector kind");
1912     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1913                         HasExplicitAmount);
1914     Op->Reg.ElementWidth = ElementWidth;
1915     return Op;
1916   }
1917 
1918   static std::unique_ptr<AArch64Operand>
1919   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1920                    unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1921                    MCContext &Ctx) {
1922     auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
1923     Op->VectorList.RegNum = RegNum;
1924     Op->VectorList.Count = Count;
1925     Op->VectorList.NumElements = NumElements;
1926     Op->VectorList.ElementWidth = ElementWidth;
1927     Op->VectorList.RegisterKind = RegisterKind;
1928     Op->StartLoc = S;
1929     Op->EndLoc = E;
1930     return Op;
1931   }
1932 
1933   static std::unique_ptr<AArch64Operand>
1934   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1935     auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1936     Op->VectorIndex.Val = Idx;
1937     Op->StartLoc = S;
1938     Op->EndLoc = E;
1939     return Op;
1940   }
1941 
1942   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1943                                                    SMLoc E, MCContext &Ctx) {
1944     auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
1945     Op->Imm.Val = Val;
1946     Op->StartLoc = S;
1947     Op->EndLoc = E;
1948     return Op;
1949   }
1950 
1951   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1952                                                           unsigned ShiftAmount,
1953                                                           SMLoc S, SMLoc E,
1954                                                           MCContext &Ctx) {
1955     auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1956     Op->ShiftedImm .Val = Val;
1957     Op->ShiftedImm.ShiftAmount = ShiftAmount;
1958     Op->StartLoc = S;
1959     Op->EndLoc = E;
1960     return Op;
1961   }
1962 
1963   static std::unique_ptr<AArch64Operand>
1964   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1965     auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
1966     Op->CondCode.Code = Code;
1967     Op->StartLoc = S;
1968     Op->EndLoc = E;
1969     return Op;
1970   }
1971 
1972   static std::unique_ptr<AArch64Operand>
1973   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1974     auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
1975     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1976     Op->FPImm.IsExact = IsExact;
1977     Op->StartLoc = S;
1978     Op->EndLoc = S;
1979     return Op;
1980   }
1981 
1982   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1983                                                        StringRef Str,
1984                                                        SMLoc S,
1985                                                        MCContext &Ctx,
1986                                                        bool HasnXSModifier) {
1987     auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
1988     Op->Barrier.Val = Val;
1989     Op->Barrier.Data = Str.data();
1990     Op->Barrier.Length = Str.size();
1991     Op->Barrier.HasnXSModifier = HasnXSModifier;
1992     Op->StartLoc = S;
1993     Op->EndLoc = S;
1994     return Op;
1995   }
1996 
1997   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1998                                                       uint32_t MRSReg,
1999                                                       uint32_t MSRReg,
2000                                                       uint32_t PStateField,
2001                                                       MCContext &Ctx) {
2002     auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2003     Op->SysReg.Data = Str.data();
2004     Op->SysReg.Length = Str.size();
2005     Op->SysReg.MRSReg = MRSReg;
2006     Op->SysReg.MSRReg = MSRReg;
2007     Op->SysReg.PStateField = PStateField;
2008     Op->StartLoc = S;
2009     Op->EndLoc = S;
2010     return Op;
2011   }
2012 
2013   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2014                                                      SMLoc E, MCContext &Ctx) {
2015     auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2016     Op->SysCRImm.Val = Val;
2017     Op->StartLoc = S;
2018     Op->EndLoc = E;
2019     return Op;
2020   }
2021 
2022   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2023                                                         StringRef Str,
2024                                                         SMLoc S,
2025                                                         MCContext &Ctx) {
2026     auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2027     Op->Prefetch.Val = Val;
2028     Op->Barrier.Data = Str.data();
2029     Op->Barrier.Length = Str.size();
2030     Op->StartLoc = S;
2031     Op->EndLoc = S;
2032     return Op;
2033   }
2034 
2035   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2036                                                        StringRef Str,
2037                                                        SMLoc S,
2038                                                        MCContext &Ctx) {
2039     auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2040     Op->PSBHint.Val = Val;
2041     Op->PSBHint.Data = Str.data();
2042     Op->PSBHint.Length = Str.size();
2043     Op->StartLoc = S;
2044     Op->EndLoc = S;
2045     return Op;
2046   }
2047 
2048   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2049                                                        StringRef Str,
2050                                                        SMLoc S,
2051                                                        MCContext &Ctx) {
2052     auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2053     Op->BTIHint.Val = Val << 1 | 32;
2054     Op->BTIHint.Data = Str.data();
2055     Op->BTIHint.Length = Str.size();
2056     Op->StartLoc = S;
2057     Op->EndLoc = S;
2058     return Op;
2059   }
2060 
2061   static std::unique_ptr<AArch64Operand>
2062   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2063                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2064     auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2065     Op->ShiftExtend.Type = ShOp;
2066     Op->ShiftExtend.Amount = Val;
2067     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2068     Op->StartLoc = S;
2069     Op->EndLoc = E;
2070     return Op;
2071   }
2072 };
2073 
2074 } // end anonymous namespace.
2075 
2076 void AArch64Operand::print(raw_ostream &OS) const {
2077   switch (Kind) {
2078   case k_FPImm:
2079     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2080     if (!getFPImmIsExact())
2081       OS << " (inexact)";
2082     OS << ">";
2083     break;
2084   case k_Barrier: {
2085     StringRef Name = getBarrierName();
2086     if (!Name.empty())
2087       OS << "<barrier " << Name << ">";
2088     else
2089       OS << "<barrier invalid #" << getBarrier() << ">";
2090     break;
2091   }
2092   case k_Immediate:
2093     OS << *getImm();
2094     break;
2095   case k_ShiftedImm: {
2096     unsigned Shift = getShiftedImmShift();
2097     OS << "<shiftedimm ";
2098     OS << *getShiftedImmVal();
2099     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2100     break;
2101   }
2102   case k_CondCode:
2103     OS << "<condcode " << getCondCode() << ">";
2104     break;
2105   case k_VectorList: {
2106     OS << "<vectorlist ";
2107     unsigned Reg = getVectorListStart();
2108     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2109       OS << Reg + i << " ";
2110     OS << ">";
2111     break;
2112   }
2113   case k_VectorIndex:
2114     OS << "<vectorindex " << getVectorIndex() << ">";
2115     break;
2116   case k_SysReg:
2117     OS << "<sysreg: " << getSysReg() << '>';
2118     break;
2119   case k_Token:
2120     OS << "'" << getToken() << "'";
2121     break;
2122   case k_SysCR:
2123     OS << "c" << getSysCR();
2124     break;
2125   case k_Prefetch: {
2126     StringRef Name = getPrefetchName();
2127     if (!Name.empty())
2128       OS << "<prfop " << Name << ">";
2129     else
2130       OS << "<prfop invalid #" << getPrefetch() << ">";
2131     break;
2132   }
2133   case k_PSBHint:
2134     OS << getPSBHintName();
2135     break;
2136   case k_BTIHint:
2137     OS << getBTIHintName();
2138     break;
2139   case k_Register:
2140     OS << "<register " << getReg() << ">";
2141     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2142       break;
2143     LLVM_FALLTHROUGH;
2144   case k_ShiftExtend:
2145     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2146        << getShiftExtendAmount();
2147     if (!hasShiftExtendAmount())
2148       OS << "<imp>";
2149     OS << '>';
2150     break;
2151   }
2152 }
2153 
2154 /// @name Auto-generated Match Functions
2155 /// {
2156 
2157 static unsigned MatchRegisterName(StringRef Name);
2158 
2159 /// }
2160 
2161 static unsigned MatchNeonVectorRegName(StringRef Name) {
2162   return StringSwitch<unsigned>(Name.lower())
2163       .Case("v0", AArch64::Q0)
2164       .Case("v1", AArch64::Q1)
2165       .Case("v2", AArch64::Q2)
2166       .Case("v3", AArch64::Q3)
2167       .Case("v4", AArch64::Q4)
2168       .Case("v5", AArch64::Q5)
2169       .Case("v6", AArch64::Q6)
2170       .Case("v7", AArch64::Q7)
2171       .Case("v8", AArch64::Q8)
2172       .Case("v9", AArch64::Q9)
2173       .Case("v10", AArch64::Q10)
2174       .Case("v11", AArch64::Q11)
2175       .Case("v12", AArch64::Q12)
2176       .Case("v13", AArch64::Q13)
2177       .Case("v14", AArch64::Q14)
2178       .Case("v15", AArch64::Q15)
2179       .Case("v16", AArch64::Q16)
2180       .Case("v17", AArch64::Q17)
2181       .Case("v18", AArch64::Q18)
2182       .Case("v19", AArch64::Q19)
2183       .Case("v20", AArch64::Q20)
2184       .Case("v21", AArch64::Q21)
2185       .Case("v22", AArch64::Q22)
2186       .Case("v23", AArch64::Q23)
2187       .Case("v24", AArch64::Q24)
2188       .Case("v25", AArch64::Q25)
2189       .Case("v26", AArch64::Q26)
2190       .Case("v27", AArch64::Q27)
2191       .Case("v28", AArch64::Q28)
2192       .Case("v29", AArch64::Q29)
2193       .Case("v30", AArch64::Q30)
2194       .Case("v31", AArch64::Q31)
2195       .Default(0);
2196 }
2197 
2198 /// Returns an optional pair of (#elements, element-width) if Suffix
2199 /// is a valid vector kind. Where the number of elements in a vector
2200 /// or the vector width is implicit or explicitly unknown (but still a
2201 /// valid suffix kind), 0 is used.
2202 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2203                                                      RegKind VectorKind) {
2204   std::pair<int, int> Res = {-1, -1};
2205 
2206   switch (VectorKind) {
2207   case RegKind::NeonVector:
2208     Res =
2209         StringSwitch<std::pair<int, int>>(Suffix.lower())
2210             .Case("", {0, 0})
2211             .Case(".1d", {1, 64})
2212             .Case(".1q", {1, 128})
2213             // '.2h' needed for fp16 scalar pairwise reductions
2214             .Case(".2h", {2, 16})
2215             .Case(".2s", {2, 32})
2216             .Case(".2d", {2, 64})
2217             // '.4b' is another special case for the ARMv8.2a dot product
2218             // operand
2219             .Case(".4b", {4, 8})
2220             .Case(".4h", {4, 16})
2221             .Case(".4s", {4, 32})
2222             .Case(".8b", {8, 8})
2223             .Case(".8h", {8, 16})
2224             .Case(".16b", {16, 8})
2225             // Accept the width neutral ones, too, for verbose syntax. If those
2226             // aren't used in the right places, the token operand won't match so
2227             // all will work out.
2228             .Case(".b", {0, 8})
2229             .Case(".h", {0, 16})
2230             .Case(".s", {0, 32})
2231             .Case(".d", {0, 64})
2232             .Default({-1, -1});
2233     break;
2234   case RegKind::SVEPredicateVector:
2235   case RegKind::SVEDataVector:
2236     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2237               .Case("", {0, 0})
2238               .Case(".b", {0, 8})
2239               .Case(".h", {0, 16})
2240               .Case(".s", {0, 32})
2241               .Case(".d", {0, 64})
2242               .Case(".q", {0, 128})
2243               .Default({-1, -1});
2244     break;
2245   default:
2246     llvm_unreachable("Unsupported RegKind");
2247   }
2248 
2249   if (Res == std::make_pair(-1, -1))
2250     return Optional<std::pair<int, int>>();
2251 
2252   return Optional<std::pair<int, int>>(Res);
2253 }
2254 
2255 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2256   return parseVectorKind(Suffix, VectorKind).hasValue();
2257 }
2258 
2259 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2260   return StringSwitch<unsigned>(Name.lower())
2261       .Case("z0", AArch64::Z0)
2262       .Case("z1", AArch64::Z1)
2263       .Case("z2", AArch64::Z2)
2264       .Case("z3", AArch64::Z3)
2265       .Case("z4", AArch64::Z4)
2266       .Case("z5", AArch64::Z5)
2267       .Case("z6", AArch64::Z6)
2268       .Case("z7", AArch64::Z7)
2269       .Case("z8", AArch64::Z8)
2270       .Case("z9", AArch64::Z9)
2271       .Case("z10", AArch64::Z10)
2272       .Case("z11", AArch64::Z11)
2273       .Case("z12", AArch64::Z12)
2274       .Case("z13", AArch64::Z13)
2275       .Case("z14", AArch64::Z14)
2276       .Case("z15", AArch64::Z15)
2277       .Case("z16", AArch64::Z16)
2278       .Case("z17", AArch64::Z17)
2279       .Case("z18", AArch64::Z18)
2280       .Case("z19", AArch64::Z19)
2281       .Case("z20", AArch64::Z20)
2282       .Case("z21", AArch64::Z21)
2283       .Case("z22", AArch64::Z22)
2284       .Case("z23", AArch64::Z23)
2285       .Case("z24", AArch64::Z24)
2286       .Case("z25", AArch64::Z25)
2287       .Case("z26", AArch64::Z26)
2288       .Case("z27", AArch64::Z27)
2289       .Case("z28", AArch64::Z28)
2290       .Case("z29", AArch64::Z29)
2291       .Case("z30", AArch64::Z30)
2292       .Case("z31", AArch64::Z31)
2293       .Default(0);
2294 }
2295 
2296 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2297   return StringSwitch<unsigned>(Name.lower())
2298       .Case("p0", AArch64::P0)
2299       .Case("p1", AArch64::P1)
2300       .Case("p2", AArch64::P2)
2301       .Case("p3", AArch64::P3)
2302       .Case("p4", AArch64::P4)
2303       .Case("p5", AArch64::P5)
2304       .Case("p6", AArch64::P6)
2305       .Case("p7", AArch64::P7)
2306       .Case("p8", AArch64::P8)
2307       .Case("p9", AArch64::P9)
2308       .Case("p10", AArch64::P10)
2309       .Case("p11", AArch64::P11)
2310       .Case("p12", AArch64::P12)
2311       .Case("p13", AArch64::P13)
2312       .Case("p14", AArch64::P14)
2313       .Case("p15", AArch64::P15)
2314       .Default(0);
2315 }
2316 
2317 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2318                                      SMLoc &EndLoc) {
2319   return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2320 }
2321 
2322 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2323                                                         SMLoc &StartLoc,
2324                                                         SMLoc &EndLoc) {
2325   StartLoc = getLoc();
2326   auto Res = tryParseScalarRegister(RegNo);
2327   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2328   return Res;
2329 }
2330 
2331 // Matches a register name or register alias previously defined by '.req'
2332 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2333                                                   RegKind Kind) {
2334   unsigned RegNum = 0;
2335   if ((RegNum = matchSVEDataVectorRegName(Name)))
2336     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2337 
2338   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2339     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2340 
2341   if ((RegNum = MatchNeonVectorRegName(Name)))
2342     return Kind == RegKind::NeonVector ? RegNum : 0;
2343 
2344   // The parsed register must be of RegKind Scalar
2345   if ((RegNum = MatchRegisterName(Name)))
2346     return Kind == RegKind::Scalar ? RegNum : 0;
2347 
2348   if (!RegNum) {
2349     // Handle a few common aliases of registers.
2350     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2351                     .Case("fp", AArch64::FP)
2352                     .Case("lr",  AArch64::LR)
2353                     .Case("x31", AArch64::XZR)
2354                     .Case("w31", AArch64::WZR)
2355                     .Default(0))
2356       return Kind == RegKind::Scalar ? RegNum : 0;
2357 
2358     // Check for aliases registered via .req. Canonicalize to lower case.
2359     // That's more consistent since register names are case insensitive, and
2360     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2361     auto Entry = RegisterReqs.find(Name.lower());
2362     if (Entry == RegisterReqs.end())
2363       return 0;
2364 
2365     // set RegNum if the match is the right kind of register
2366     if (Kind == Entry->getValue().first)
2367       RegNum = Entry->getValue().second;
2368   }
2369   return RegNum;
2370 }
2371 
2372 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2373 /// Identifier when called, and if it is a register name the token is eaten and
2374 /// the register is added to the operand list.
2375 OperandMatchResultTy
2376 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2377   MCAsmParser &Parser = getParser();
2378   const AsmToken &Tok = Parser.getTok();
2379   if (Tok.isNot(AsmToken::Identifier))
2380     return MatchOperand_NoMatch;
2381 
2382   std::string lowerCase = Tok.getString().lower();
2383   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2384   if (Reg == 0)
2385     return MatchOperand_NoMatch;
2386 
2387   RegNum = Reg;
2388   Parser.Lex(); // Eat identifier token.
2389   return MatchOperand_Success;
2390 }
2391 
2392 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2393 OperandMatchResultTy
2394 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2395   MCAsmParser &Parser = getParser();
2396   SMLoc S = getLoc();
2397 
2398   if (Parser.getTok().isNot(AsmToken::Identifier)) {
2399     Error(S, "Expected cN operand where 0 <= N <= 15");
2400     return MatchOperand_ParseFail;
2401   }
2402 
2403   StringRef Tok = Parser.getTok().getIdentifier();
2404   if (Tok[0] != 'c' && Tok[0] != 'C') {
2405     Error(S, "Expected cN operand where 0 <= N <= 15");
2406     return MatchOperand_ParseFail;
2407   }
2408 
2409   uint32_t CRNum;
2410   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2411   if (BadNum || CRNum > 15) {
2412     Error(S, "Expected cN operand where 0 <= N <= 15");
2413     return MatchOperand_ParseFail;
2414   }
2415 
2416   Parser.Lex(); // Eat identifier token.
2417   Operands.push_back(
2418       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2419   return MatchOperand_Success;
2420 }
2421 
2422 /// tryParsePrefetch - Try to parse a prefetch operand.
2423 template <bool IsSVEPrefetch>
2424 OperandMatchResultTy
2425 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2426   MCAsmParser &Parser = getParser();
2427   SMLoc S = getLoc();
2428   const AsmToken &Tok = Parser.getTok();
2429 
2430   auto LookupByName = [](StringRef N) {
2431     if (IsSVEPrefetch) {
2432       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2433         return Optional<unsigned>(Res->Encoding);
2434     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2435       return Optional<unsigned>(Res->Encoding);
2436     return Optional<unsigned>();
2437   };
2438 
2439   auto LookupByEncoding = [](unsigned E) {
2440     if (IsSVEPrefetch) {
2441       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2442         return Optional<StringRef>(Res->Name);
2443     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2444       return Optional<StringRef>(Res->Name);
2445     return Optional<StringRef>();
2446   };
2447   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2448 
2449   // Either an identifier for named values or a 5-bit immediate.
2450   // Eat optional hash.
2451   if (parseOptionalToken(AsmToken::Hash) ||
2452       Tok.is(AsmToken::Integer)) {
2453     const MCExpr *ImmVal;
2454     if (getParser().parseExpression(ImmVal))
2455       return MatchOperand_ParseFail;
2456 
2457     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2458     if (!MCE) {
2459       TokError("immediate value expected for prefetch operand");
2460       return MatchOperand_ParseFail;
2461     }
2462     unsigned prfop = MCE->getValue();
2463     if (prfop > MaxVal) {
2464       TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2465                "] expected");
2466       return MatchOperand_ParseFail;
2467     }
2468 
2469     auto PRFM = LookupByEncoding(MCE->getValue());
2470     Operands.push_back(AArch64Operand::CreatePrefetch(
2471         prfop, PRFM.getValueOr(""), S, getContext()));
2472     return MatchOperand_Success;
2473   }
2474 
2475   if (Tok.isNot(AsmToken::Identifier)) {
2476     TokError("prefetch hint expected");
2477     return MatchOperand_ParseFail;
2478   }
2479 
2480   auto PRFM = LookupByName(Tok.getString());
2481   if (!PRFM) {
2482     TokError("prefetch hint expected");
2483     return MatchOperand_ParseFail;
2484   }
2485 
2486   Operands.push_back(AArch64Operand::CreatePrefetch(
2487       *PRFM, Tok.getString(), S, getContext()));
2488   Parser.Lex(); // Eat identifier token.
2489   return MatchOperand_Success;
2490 }
2491 
2492 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2493 OperandMatchResultTy
2494 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2495   MCAsmParser &Parser = getParser();
2496   SMLoc S = getLoc();
2497   const AsmToken &Tok = Parser.getTok();
2498   if (Tok.isNot(AsmToken::Identifier)) {
2499     TokError("invalid operand for instruction");
2500     return MatchOperand_ParseFail;
2501   }
2502 
2503   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2504   if (!PSB) {
2505     TokError("invalid operand for instruction");
2506     return MatchOperand_ParseFail;
2507   }
2508 
2509   Operands.push_back(AArch64Operand::CreatePSBHint(
2510       PSB->Encoding, Tok.getString(), S, getContext()));
2511   Parser.Lex(); // Eat identifier token.
2512   return MatchOperand_Success;
2513 }
2514 
2515 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2516 OperandMatchResultTy
2517 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2518   MCAsmParser &Parser = getParser();
2519   SMLoc S = getLoc();
2520   const AsmToken &Tok = Parser.getTok();
2521   if (Tok.isNot(AsmToken::Identifier)) {
2522     TokError("invalid operand for instruction");
2523     return MatchOperand_ParseFail;
2524   }
2525 
2526   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2527   if (!BTI) {
2528     TokError("invalid operand for instruction");
2529     return MatchOperand_ParseFail;
2530   }
2531 
2532   Operands.push_back(AArch64Operand::CreateBTIHint(
2533       BTI->Encoding, Tok.getString(), S, getContext()));
2534   Parser.Lex(); // Eat identifier token.
2535   return MatchOperand_Success;
2536 }
2537 
2538 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2539 /// instruction.
2540 OperandMatchResultTy
2541 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2542   MCAsmParser &Parser = getParser();
2543   SMLoc S = getLoc();
2544   const MCExpr *Expr = nullptr;
2545 
2546   if (Parser.getTok().is(AsmToken::Hash)) {
2547     Parser.Lex(); // Eat hash token.
2548   }
2549 
2550   if (parseSymbolicImmVal(Expr))
2551     return MatchOperand_ParseFail;
2552 
2553   AArch64MCExpr::VariantKind ELFRefKind;
2554   MCSymbolRefExpr::VariantKind DarwinRefKind;
2555   int64_t Addend;
2556   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2557     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2558         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2559       // No modifier was specified at all; this is the syntax for an ELF basic
2560       // ADRP relocation (unfortunately).
2561       Expr =
2562           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2563     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2564                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2565                Addend != 0) {
2566       Error(S, "gotpage label reference not allowed an addend");
2567       return MatchOperand_ParseFail;
2568     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2569                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2570                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2571                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2572                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2573                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2574                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2575                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2576       // The operand must be an @page or @gotpage qualified symbolref.
2577       Error(S, "page or gotpage label reference expected");
2578       return MatchOperand_ParseFail;
2579     }
2580   }
2581 
2582   // We have either a label reference possibly with addend or an immediate. The
2583   // addend is a raw value here. The linker will adjust it to only reference the
2584   // page.
2585   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2586   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2587 
2588   return MatchOperand_Success;
2589 }
2590 
2591 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2592 /// instruction.
2593 OperandMatchResultTy
2594 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2595   SMLoc S = getLoc();
2596   const MCExpr *Expr = nullptr;
2597 
2598   // Leave anything with a bracket to the default for SVE
2599   if (getParser().getTok().is(AsmToken::LBrac))
2600     return MatchOperand_NoMatch;
2601 
2602   if (getParser().getTok().is(AsmToken::Hash))
2603     getParser().Lex(); // Eat hash token.
2604 
2605   if (parseSymbolicImmVal(Expr))
2606     return MatchOperand_ParseFail;
2607 
2608   AArch64MCExpr::VariantKind ELFRefKind;
2609   MCSymbolRefExpr::VariantKind DarwinRefKind;
2610   int64_t Addend;
2611   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2612     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2613         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2614       // No modifier was specified at all; this is the syntax for an ELF basic
2615       // ADR relocation (unfortunately).
2616       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2617     } else {
2618       Error(S, "unexpected adr label");
2619       return MatchOperand_ParseFail;
2620     }
2621   }
2622 
2623   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2624   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2625   return MatchOperand_Success;
2626 }
2627 
2628 /// tryParseFPImm - A floating point immediate expression operand.
2629 template<bool AddFPZeroAsLiteral>
2630 OperandMatchResultTy
2631 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2632   MCAsmParser &Parser = getParser();
2633   SMLoc S = getLoc();
2634 
2635   bool Hash = parseOptionalToken(AsmToken::Hash);
2636 
2637   // Handle negation, as that still comes through as a separate token.
2638   bool isNegative = parseOptionalToken(AsmToken::Minus);
2639 
2640   const AsmToken &Tok = Parser.getTok();
2641   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2642     if (!Hash)
2643       return MatchOperand_NoMatch;
2644     TokError("invalid floating point immediate");
2645     return MatchOperand_ParseFail;
2646   }
2647 
2648   // Parse hexadecimal representation.
2649   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2650     if (Tok.getIntVal() > 255 || isNegative) {
2651       TokError("encoded floating point value out of range");
2652       return MatchOperand_ParseFail;
2653     }
2654 
2655     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2656     Operands.push_back(
2657         AArch64Operand::CreateFPImm(F, true, S, getContext()));
2658   } else {
2659     // Parse FP representation.
2660     APFloat RealVal(APFloat::IEEEdouble());
2661     auto StatusOrErr =
2662         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2663     if (errorToBool(StatusOrErr.takeError())) {
2664       TokError("invalid floating point representation");
2665       return MatchOperand_ParseFail;
2666     }
2667 
2668     if (isNegative)
2669       RealVal.changeSign();
2670 
2671     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2672       Operands.push_back(
2673           AArch64Operand::CreateToken("#0", false, S, getContext()));
2674       Operands.push_back(
2675           AArch64Operand::CreateToken(".0", false, S, getContext()));
2676     } else
2677       Operands.push_back(AArch64Operand::CreateFPImm(
2678           RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2679   }
2680 
2681   Parser.Lex(); // Eat the token.
2682 
2683   return MatchOperand_Success;
2684 }
2685 
2686 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2687 /// a shift suffix, for example '#1, lsl #12'.
2688 OperandMatchResultTy
2689 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2690   MCAsmParser &Parser = getParser();
2691   SMLoc S = getLoc();
2692 
2693   if (Parser.getTok().is(AsmToken::Hash))
2694     Parser.Lex(); // Eat '#'
2695   else if (Parser.getTok().isNot(AsmToken::Integer))
2696     // Operand should start from # or should be integer, emit error otherwise.
2697     return MatchOperand_NoMatch;
2698 
2699   const MCExpr *Imm = nullptr;
2700   if (parseSymbolicImmVal(Imm))
2701     return MatchOperand_ParseFail;
2702   else if (Parser.getTok().isNot(AsmToken::Comma)) {
2703     SMLoc E = Parser.getTok().getLoc();
2704     Operands.push_back(
2705         AArch64Operand::CreateImm(Imm, S, E, getContext()));
2706     return MatchOperand_Success;
2707   }
2708 
2709   // Eat ','
2710   Parser.Lex();
2711 
2712   // The optional operand must be "lsl #N" where N is non-negative.
2713   if (!Parser.getTok().is(AsmToken::Identifier) ||
2714       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2715     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2716     return MatchOperand_ParseFail;
2717   }
2718 
2719   // Eat 'lsl'
2720   Parser.Lex();
2721 
2722   parseOptionalToken(AsmToken::Hash);
2723 
2724   if (Parser.getTok().isNot(AsmToken::Integer)) {
2725     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2726     return MatchOperand_ParseFail;
2727   }
2728 
2729   int64_t ShiftAmount = Parser.getTok().getIntVal();
2730 
2731   if (ShiftAmount < 0) {
2732     Error(Parser.getTok().getLoc(), "positive shift amount required");
2733     return MatchOperand_ParseFail;
2734   }
2735   Parser.Lex(); // Eat the number
2736 
2737   // Just in case the optional lsl #0 is used for immediates other than zero.
2738   if (ShiftAmount == 0 && Imm != nullptr) {
2739     SMLoc E = Parser.getTok().getLoc();
2740     Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2741     return MatchOperand_Success;
2742   }
2743 
2744   SMLoc E = Parser.getTok().getLoc();
2745   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2746                                                       S, E, getContext()));
2747   return MatchOperand_Success;
2748 }
2749 
2750 /// parseCondCodeString - Parse a Condition Code string.
2751 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2752   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2753                     .Case("eq", AArch64CC::EQ)
2754                     .Case("ne", AArch64CC::NE)
2755                     .Case("cs", AArch64CC::HS)
2756                     .Case("hs", AArch64CC::HS)
2757                     .Case("cc", AArch64CC::LO)
2758                     .Case("lo", AArch64CC::LO)
2759                     .Case("mi", AArch64CC::MI)
2760                     .Case("pl", AArch64CC::PL)
2761                     .Case("vs", AArch64CC::VS)
2762                     .Case("vc", AArch64CC::VC)
2763                     .Case("hi", AArch64CC::HI)
2764                     .Case("ls", AArch64CC::LS)
2765                     .Case("ge", AArch64CC::GE)
2766                     .Case("lt", AArch64CC::LT)
2767                     .Case("gt", AArch64CC::GT)
2768                     .Case("le", AArch64CC::LE)
2769                     .Case("al", AArch64CC::AL)
2770                     .Case("nv", AArch64CC::NV)
2771                     .Default(AArch64CC::Invalid);
2772 
2773   if (CC == AArch64CC::Invalid &&
2774       getSTI().getFeatureBits()[AArch64::FeatureSVE])
2775     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2776                     .Case("none",  AArch64CC::EQ)
2777                     .Case("any",   AArch64CC::NE)
2778                     .Case("nlast", AArch64CC::HS)
2779                     .Case("last",  AArch64CC::LO)
2780                     .Case("first", AArch64CC::MI)
2781                     .Case("nfrst", AArch64CC::PL)
2782                     .Case("pmore", AArch64CC::HI)
2783                     .Case("plast", AArch64CC::LS)
2784                     .Case("tcont", AArch64CC::GE)
2785                     .Case("tstop", AArch64CC::LT)
2786                     .Default(AArch64CC::Invalid);
2787 
2788   return CC;
2789 }
2790 
2791 /// parseCondCode - Parse a Condition Code operand.
2792 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2793                                      bool invertCondCode) {
2794   MCAsmParser &Parser = getParser();
2795   SMLoc S = getLoc();
2796   const AsmToken &Tok = Parser.getTok();
2797   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2798 
2799   StringRef Cond = Tok.getString();
2800   AArch64CC::CondCode CC = parseCondCodeString(Cond);
2801   if (CC == AArch64CC::Invalid)
2802     return TokError("invalid condition code");
2803   Parser.Lex(); // Eat identifier token.
2804 
2805   if (invertCondCode) {
2806     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2807       return TokError("condition codes AL and NV are invalid for this instruction");
2808     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2809   }
2810 
2811   Operands.push_back(
2812       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2813   return false;
2814 }
2815 
2816 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2817 /// them if present.
2818 OperandMatchResultTy
2819 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2820   MCAsmParser &Parser = getParser();
2821   const AsmToken &Tok = Parser.getTok();
2822   std::string LowerID = Tok.getString().lower();
2823   AArch64_AM::ShiftExtendType ShOp =
2824       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2825           .Case("lsl", AArch64_AM::LSL)
2826           .Case("lsr", AArch64_AM::LSR)
2827           .Case("asr", AArch64_AM::ASR)
2828           .Case("ror", AArch64_AM::ROR)
2829           .Case("msl", AArch64_AM::MSL)
2830           .Case("uxtb", AArch64_AM::UXTB)
2831           .Case("uxth", AArch64_AM::UXTH)
2832           .Case("uxtw", AArch64_AM::UXTW)
2833           .Case("uxtx", AArch64_AM::UXTX)
2834           .Case("sxtb", AArch64_AM::SXTB)
2835           .Case("sxth", AArch64_AM::SXTH)
2836           .Case("sxtw", AArch64_AM::SXTW)
2837           .Case("sxtx", AArch64_AM::SXTX)
2838           .Default(AArch64_AM::InvalidShiftExtend);
2839 
2840   if (ShOp == AArch64_AM::InvalidShiftExtend)
2841     return MatchOperand_NoMatch;
2842 
2843   SMLoc S = Tok.getLoc();
2844   Parser.Lex();
2845 
2846   bool Hash = parseOptionalToken(AsmToken::Hash);
2847 
2848   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2849     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2850         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2851         ShOp == AArch64_AM::MSL) {
2852       // We expect a number here.
2853       TokError("expected #imm after shift specifier");
2854       return MatchOperand_ParseFail;
2855     }
2856 
2857     // "extend" type operations don't need an immediate, #0 is implicit.
2858     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2859     Operands.push_back(
2860         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2861     return MatchOperand_Success;
2862   }
2863 
2864   // Make sure we do actually have a number, identifier or a parenthesized
2865   // expression.
2866   SMLoc E = Parser.getTok().getLoc();
2867   if (!Parser.getTok().is(AsmToken::Integer) &&
2868       !Parser.getTok().is(AsmToken::LParen) &&
2869       !Parser.getTok().is(AsmToken::Identifier)) {
2870     Error(E, "expected integer shift amount");
2871     return MatchOperand_ParseFail;
2872   }
2873 
2874   const MCExpr *ImmVal;
2875   if (getParser().parseExpression(ImmVal))
2876     return MatchOperand_ParseFail;
2877 
2878   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2879   if (!MCE) {
2880     Error(E, "expected constant '#imm' after shift specifier");
2881     return MatchOperand_ParseFail;
2882   }
2883 
2884   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2885   Operands.push_back(AArch64Operand::CreateShiftExtend(
2886       ShOp, MCE->getValue(), true, S, E, getContext()));
2887   return MatchOperand_Success;
2888 }
2889 
2890 static const struct Extension {
2891   const char *Name;
2892   const FeatureBitset Features;
2893 } ExtensionMap[] = {
2894     {"crc", {AArch64::FeatureCRC}},
2895     {"sm4", {AArch64::FeatureSM4}},
2896     {"sha3", {AArch64::FeatureSHA3}},
2897     {"sha2", {AArch64::FeatureSHA2}},
2898     {"aes", {AArch64::FeatureAES}},
2899     {"crypto", {AArch64::FeatureCrypto}},
2900     {"fp", {AArch64::FeatureFPARMv8}},
2901     {"simd", {AArch64::FeatureNEON}},
2902     {"ras", {AArch64::FeatureRAS}},
2903     {"lse", {AArch64::FeatureLSE}},
2904     {"predres", {AArch64::FeaturePredRes}},
2905     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2906     {"mte", {AArch64::FeatureMTE}},
2907     {"memtag", {AArch64::FeatureMTE}},
2908     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2909     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2910     {"ccpp", {AArch64::FeatureCCPP}},
2911     {"rcpc", {AArch64::FeatureRCPC}},
2912     {"sve", {AArch64::FeatureSVE}},
2913     {"sve2", {AArch64::FeatureSVE2}},
2914     {"sve2-aes", {AArch64::FeatureSVE2AES}},
2915     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2916     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2917     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2918     {"ls64", {AArch64::FeatureLS64}},
2919     {"xs", {AArch64::FeatureXS}},
2920     {"pauth", {AArch64::FeaturePAuth}},
2921     {"flagm", {AArch64::FeatureFlagM}},
2922     // FIXME: Unsupported extensions
2923     {"pan", {}},
2924     {"lor", {}},
2925     {"rdma", {}},
2926     {"profile", {}},
2927 };
2928 
2929 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2930   if (FBS[AArch64::HasV8_1aOps])
2931     Str += "ARMv8.1a";
2932   else if (FBS[AArch64::HasV8_2aOps])
2933     Str += "ARMv8.2a";
2934   else if (FBS[AArch64::HasV8_3aOps])
2935     Str += "ARMv8.3a";
2936   else if (FBS[AArch64::HasV8_4aOps])
2937     Str += "ARMv8.4a";
2938   else if (FBS[AArch64::HasV8_5aOps])
2939     Str += "ARMv8.5a";
2940   else if (FBS[AArch64::HasV8_6aOps])
2941     Str += "ARMv8.6a";
2942   else if (FBS[AArch64::HasV8_7aOps])
2943     Str += "ARMv8.7a";
2944   else {
2945     SmallVector<std::string, 2> ExtMatches;
2946     for (const auto& Ext : ExtensionMap) {
2947       // Use & in case multiple features are enabled
2948       if ((FBS & Ext.Features) != FeatureBitset())
2949         ExtMatches.push_back(Ext.Name);
2950     }
2951     Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
2952   }
2953 }
2954 
2955 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2956                                       SMLoc S) {
2957   const uint16_t Op2 = Encoding & 7;
2958   const uint16_t Cm = (Encoding & 0x78) >> 3;
2959   const uint16_t Cn = (Encoding & 0x780) >> 7;
2960   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2961 
2962   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2963 
2964   Operands.push_back(
2965       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2966   Operands.push_back(
2967       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2968   Operands.push_back(
2969       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2970   Expr = MCConstantExpr::create(Op2, getContext());
2971   Operands.push_back(
2972       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2973 }
2974 
2975 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2976 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2977 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2978                                    OperandVector &Operands) {
2979   if (Name.find('.') != StringRef::npos)
2980     return TokError("invalid operand");
2981 
2982   Mnemonic = Name;
2983   Operands.push_back(
2984       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2985 
2986   MCAsmParser &Parser = getParser();
2987   const AsmToken &Tok = Parser.getTok();
2988   StringRef Op = Tok.getString();
2989   SMLoc S = Tok.getLoc();
2990 
2991   if (Mnemonic == "ic") {
2992     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2993     if (!IC)
2994       return TokError("invalid operand for IC instruction");
2995     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2996       std::string Str("IC " + std::string(IC->Name) + " requires: ");
2997       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2998       return TokError(Str.c_str());
2999     }
3000     createSysAlias(IC->Encoding, Operands, S);
3001   } else if (Mnemonic == "dc") {
3002     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3003     if (!DC)
3004       return TokError("invalid operand for DC instruction");
3005     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3006       std::string Str("DC " + std::string(DC->Name) + " requires: ");
3007       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3008       return TokError(Str.c_str());
3009     }
3010     createSysAlias(DC->Encoding, Operands, S);
3011   } else if (Mnemonic == "at") {
3012     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3013     if (!AT)
3014       return TokError("invalid operand for AT instruction");
3015     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3016       std::string Str("AT " + std::string(AT->Name) + " requires: ");
3017       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3018       return TokError(Str.c_str());
3019     }
3020     createSysAlias(AT->Encoding, Operands, S);
3021   } else if (Mnemonic == "tlbi") {
3022     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3023     if (!TLBI)
3024       return TokError("invalid operand for TLBI instruction");
3025     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3026       std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3027       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3028       return TokError(Str.c_str());
3029     }
3030     createSysAlias(TLBI->Encoding, Operands, S);
3031   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3032     const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3033     if (!PRCTX)
3034       return TokError("invalid operand for prediction restriction instruction");
3035     else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3036       std::string Str(
3037           Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3038       setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3039       return TokError(Str.c_str());
3040     }
3041     uint16_t PRCTX_Op2 =
3042       Mnemonic == "cfp" ? 4 :
3043       Mnemonic == "dvp" ? 5 :
3044       Mnemonic == "cpp" ? 7 :
3045       0;
3046     assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3047     createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3048   }
3049 
3050   Parser.Lex(); // Eat operand.
3051 
3052   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3053   bool HasRegister = false;
3054 
3055   // Check for the optional register operand.
3056   if (parseOptionalToken(AsmToken::Comma)) {
3057     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3058       return TokError("expected register operand");
3059     HasRegister = true;
3060   }
3061 
3062   if (ExpectRegister && !HasRegister)
3063     return TokError("specified " + Mnemonic + " op requires a register");
3064   else if (!ExpectRegister && HasRegister)
3065     return TokError("specified " + Mnemonic + " op does not use a register");
3066 
3067   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3068     return true;
3069 
3070   return false;
3071 }
3072 
3073 OperandMatchResultTy
3074 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3075   MCAsmParser &Parser = getParser();
3076   const AsmToken &Tok = Parser.getTok();
3077 
3078   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3079     TokError("'csync' operand expected");
3080     return MatchOperand_ParseFail;
3081   } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3082     // Immediate operand.
3083     const MCExpr *ImmVal;
3084     SMLoc ExprLoc = getLoc();
3085     AsmToken IntTok = Tok;
3086     if (getParser().parseExpression(ImmVal))
3087       return MatchOperand_ParseFail;
3088     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3089     if (!MCE) {
3090       Error(ExprLoc, "immediate value expected for barrier operand");
3091       return MatchOperand_ParseFail;
3092     }
3093     int64_t Value = MCE->getValue();
3094     if (Mnemonic == "dsb" && Value > 15) {
3095       // This case is a no match here, but it might be matched by the nXS
3096       // variant. Deliberately not unlex the optional '#' as it is not necessary
3097       // to characterize an integer immediate.
3098       Parser.getLexer().UnLex(IntTok);
3099       return MatchOperand_NoMatch;
3100     }
3101     if (Value < 0 || Value > 15) {
3102       Error(ExprLoc, "barrier operand out of range");
3103       return MatchOperand_ParseFail;
3104     }
3105     auto DB = AArch64DB::lookupDBByEncoding(Value);
3106     Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3107                                                      ExprLoc, getContext(),
3108                                                      false /*hasnXSModifier*/));
3109     return MatchOperand_Success;
3110   }
3111 
3112   if (Tok.isNot(AsmToken::Identifier)) {
3113     TokError("invalid operand for instruction");
3114     return MatchOperand_ParseFail;
3115   }
3116 
3117   StringRef Operand = Tok.getString();
3118   auto TSB = AArch64TSB::lookupTSBByName(Operand);
3119   auto DB = AArch64DB::lookupDBByName(Operand);
3120   // The only valid named option for ISB is 'sy'
3121   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3122     TokError("'sy' or #imm operand expected");
3123     return MatchOperand_ParseFail;
3124   // The only valid named option for TSB is 'csync'
3125   } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3126     TokError("'csync' operand expected");
3127     return MatchOperand_ParseFail;
3128   } else if (!DB && !TSB) {
3129     if (Mnemonic == "dsb") {
3130       // This case is a no match here, but it might be matched by the nXS
3131       // variant.
3132       return MatchOperand_NoMatch;
3133     }
3134     TokError("invalid barrier option name");
3135     return MatchOperand_ParseFail;
3136   }
3137 
3138   Operands.push_back(AArch64Operand::CreateBarrier(
3139       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3140       getContext(), false /*hasnXSModifier*/));
3141   Parser.Lex(); // Consume the option
3142 
3143   return MatchOperand_Success;
3144 }
3145 
3146 OperandMatchResultTy
3147 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3148   MCAsmParser &Parser = getParser();
3149   const AsmToken &Tok = Parser.getTok();
3150 
3151   assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3152   if (Mnemonic != "dsb")
3153     return MatchOperand_ParseFail;
3154 
3155   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3156     // Immediate operand.
3157     const MCExpr *ImmVal;
3158     SMLoc ExprLoc = getLoc();
3159     if (getParser().parseExpression(ImmVal))
3160       return MatchOperand_ParseFail;
3161     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3162     if (!MCE) {
3163       Error(ExprLoc, "immediate value expected for barrier operand");
3164       return MatchOperand_ParseFail;
3165     }
3166     int64_t Value = MCE->getValue();
3167     // v8.7-A DSB in the nXS variant accepts only the following immediate
3168     // values: 16, 20, 24, 28.
3169     if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3170       Error(ExprLoc, "barrier operand out of range");
3171       return MatchOperand_ParseFail;
3172     }
3173     auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3174     Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3175                                                      ExprLoc, getContext(),
3176                                                      true /*hasnXSModifier*/));
3177     return MatchOperand_Success;
3178   }
3179 
3180   if (Tok.isNot(AsmToken::Identifier)) {
3181     TokError("invalid operand for instruction");
3182     return MatchOperand_ParseFail;
3183   }
3184 
3185   StringRef Operand = Tok.getString();
3186   auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3187 
3188   if (!DB) {
3189     TokError("invalid barrier option name");
3190     return MatchOperand_ParseFail;
3191   }
3192 
3193   Operands.push_back(
3194       AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3195                                     getContext(), true /*hasnXSModifier*/));
3196   Parser.Lex(); // Consume the option
3197 
3198   return MatchOperand_Success;
3199 }
3200 
3201 OperandMatchResultTy
3202 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3203   MCAsmParser &Parser = getParser();
3204   const AsmToken &Tok = Parser.getTok();
3205 
3206   if (Tok.isNot(AsmToken::Identifier))
3207     return MatchOperand_NoMatch;
3208 
3209   int MRSReg, MSRReg;
3210   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3211   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3212     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3213     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3214   } else
3215     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3216 
3217   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3218   unsigned PStateImm = -1;
3219   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3220     PStateImm = PState->Encoding;
3221 
3222   Operands.push_back(
3223       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3224                                    PStateImm, getContext()));
3225   Parser.Lex(); // Eat identifier
3226 
3227   return MatchOperand_Success;
3228 }
3229 
3230 /// tryParseNeonVectorRegister - Parse a vector register operand.
3231 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3232   MCAsmParser &Parser = getParser();
3233   if (Parser.getTok().isNot(AsmToken::Identifier))
3234     return true;
3235 
3236   SMLoc S = getLoc();
3237   // Check for a vector register specifier first.
3238   StringRef Kind;
3239   unsigned Reg;
3240   OperandMatchResultTy Res =
3241       tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3242   if (Res != MatchOperand_Success)
3243     return true;
3244 
3245   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3246   if (!KindRes)
3247     return true;
3248 
3249   unsigned ElementWidth = KindRes->second;
3250   Operands.push_back(
3251       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3252                                       S, getLoc(), getContext()));
3253 
3254   // If there was an explicit qualifier, that goes on as a literal text
3255   // operand.
3256   if (!Kind.empty())
3257     Operands.push_back(
3258         AArch64Operand::CreateToken(Kind, false, S, getContext()));
3259 
3260   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3261 }
3262 
3263 OperandMatchResultTy
3264 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3265   SMLoc SIdx = getLoc();
3266   if (parseOptionalToken(AsmToken::LBrac)) {
3267     const MCExpr *ImmVal;
3268     if (getParser().parseExpression(ImmVal))
3269       return MatchOperand_NoMatch;
3270     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3271     if (!MCE) {
3272       TokError("immediate value expected for vector index");
3273       return MatchOperand_ParseFail;;
3274     }
3275 
3276     SMLoc E = getLoc();
3277 
3278     if (parseToken(AsmToken::RBrac, "']' expected"))
3279       return MatchOperand_ParseFail;;
3280 
3281     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3282                                                          E, getContext()));
3283     return MatchOperand_Success;
3284   }
3285 
3286   return MatchOperand_NoMatch;
3287 }
3288 
3289 // tryParseVectorRegister - Try to parse a vector register name with
3290 // optional kind specifier. If it is a register specifier, eat the token
3291 // and return it.
3292 OperandMatchResultTy
3293 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3294                                          RegKind MatchKind) {
3295   MCAsmParser &Parser = getParser();
3296   const AsmToken &Tok = Parser.getTok();
3297 
3298   if (Tok.isNot(AsmToken::Identifier))
3299     return MatchOperand_NoMatch;
3300 
3301   StringRef Name = Tok.getString();
3302   // If there is a kind specifier, it's separated from the register name by
3303   // a '.'.
3304   size_t Start = 0, Next = Name.find('.');
3305   StringRef Head = Name.slice(Start, Next);
3306   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3307 
3308   if (RegNum) {
3309     if (Next != StringRef::npos) {
3310       Kind = Name.slice(Next, StringRef::npos);
3311       if (!isValidVectorKind(Kind, MatchKind)) {
3312         TokError("invalid vector kind qualifier");
3313         return MatchOperand_ParseFail;
3314       }
3315     }
3316     Parser.Lex(); // Eat the register token.
3317 
3318     Reg = RegNum;
3319     return MatchOperand_Success;
3320   }
3321 
3322   return MatchOperand_NoMatch;
3323 }
3324 
3325 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3326 OperandMatchResultTy
3327 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3328   // Check for a SVE predicate register specifier first.
3329   const SMLoc S = getLoc();
3330   StringRef Kind;
3331   unsigned RegNum;
3332   auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3333   if (Res != MatchOperand_Success)
3334     return Res;
3335 
3336   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3337   if (!KindRes)
3338     return MatchOperand_NoMatch;
3339 
3340   unsigned ElementWidth = KindRes->second;
3341   Operands.push_back(AArch64Operand::CreateVectorReg(
3342       RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3343       getLoc(), getContext()));
3344 
3345   // Not all predicates are followed by a '/m' or '/z'.
3346   MCAsmParser &Parser = getParser();
3347   if (Parser.getTok().isNot(AsmToken::Slash))
3348     return MatchOperand_Success;
3349 
3350   // But when they do they shouldn't have an element type suffix.
3351   if (!Kind.empty()) {
3352     Error(S, "not expecting size suffix");
3353     return MatchOperand_ParseFail;
3354   }
3355 
3356   // Add a literal slash as operand
3357   Operands.push_back(
3358       AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3359 
3360   Parser.Lex(); // Eat the slash.
3361 
3362   // Zeroing or merging?
3363   auto Pred = Parser.getTok().getString().lower();
3364   if (Pred != "z" && Pred != "m") {
3365     Error(getLoc(), "expecting 'm' or 'z' predication");
3366     return MatchOperand_ParseFail;
3367   }
3368 
3369   // Add zero/merge token.
3370   const char *ZM = Pred == "z" ? "z" : "m";
3371   Operands.push_back(
3372     AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3373 
3374   Parser.Lex(); // Eat zero/merge token.
3375   return MatchOperand_Success;
3376 }
3377 
3378 /// parseRegister - Parse a register operand.
3379 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3380   // Try for a Neon vector register.
3381   if (!tryParseNeonVectorRegister(Operands))
3382     return false;
3383 
3384   // Otherwise try for a scalar register.
3385   if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3386     return false;
3387 
3388   return true;
3389 }
3390 
3391 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3392   MCAsmParser &Parser = getParser();
3393   bool HasELFModifier = false;
3394   AArch64MCExpr::VariantKind RefKind;
3395 
3396   if (parseOptionalToken(AsmToken::Colon)) {
3397     HasELFModifier = true;
3398 
3399     if (Parser.getTok().isNot(AsmToken::Identifier))
3400       return TokError("expect relocation specifier in operand after ':'");
3401 
3402     std::string LowerCase = Parser.getTok().getIdentifier().lower();
3403     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3404                   .Case("lo12", AArch64MCExpr::VK_LO12)
3405                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3406                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3407                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3408                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3409                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3410                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3411                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3412                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3413                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3414                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3415                   .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3416                   .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3417                   .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3418                   .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3419                   .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3420                   .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3421                   .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3422                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3423                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3424                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3425                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3426                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3427                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3428                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3429                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3430                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3431                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3432                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3433                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3434                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3435                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3436                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3437                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3438                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3439                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3440                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3441                   .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3442                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3443                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3444                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3445                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3446                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3447                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3448                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3449                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3450                   .Default(AArch64MCExpr::VK_INVALID);
3451 
3452     if (RefKind == AArch64MCExpr::VK_INVALID)
3453       return TokError("expect relocation specifier in operand after ':'");
3454 
3455     Parser.Lex(); // Eat identifier
3456 
3457     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3458       return true;
3459   }
3460 
3461   if (getParser().parseExpression(ImmVal))
3462     return true;
3463 
3464   if (HasELFModifier)
3465     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3466 
3467   return false;
3468 }
3469 
3470 template <RegKind VectorKind>
3471 OperandMatchResultTy
3472 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3473                                      bool ExpectMatch) {
3474   MCAsmParser &Parser = getParser();
3475   if (!Parser.getTok().is(AsmToken::LCurly))
3476     return MatchOperand_NoMatch;
3477 
3478   // Wrapper around parse function
3479   auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3480                                      bool NoMatchIsError) {
3481     auto RegTok = Parser.getTok();
3482     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3483     if (ParseRes == MatchOperand_Success) {
3484       if (parseVectorKind(Kind, VectorKind))
3485         return ParseRes;
3486       llvm_unreachable("Expected a valid vector kind");
3487     }
3488 
3489     if (RegTok.isNot(AsmToken::Identifier) ||
3490         ParseRes == MatchOperand_ParseFail ||
3491         (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3492       Error(Loc, "vector register expected");
3493       return MatchOperand_ParseFail;
3494     }
3495 
3496     return MatchOperand_NoMatch;
3497   };
3498 
3499   SMLoc S = getLoc();
3500   auto LCurly = Parser.getTok();
3501   Parser.Lex(); // Eat left bracket token.
3502 
3503   StringRef Kind;
3504   unsigned FirstReg;
3505   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3506 
3507   // Put back the original left bracket if there was no match, so that
3508   // different types of list-operands can be matched (e.g. SVE, Neon).
3509   if (ParseRes == MatchOperand_NoMatch)
3510     Parser.getLexer().UnLex(LCurly);
3511 
3512   if (ParseRes != MatchOperand_Success)
3513     return ParseRes;
3514 
3515   int64_t PrevReg = FirstReg;
3516   unsigned Count = 1;
3517 
3518   if (parseOptionalToken(AsmToken::Minus)) {
3519     SMLoc Loc = getLoc();
3520     StringRef NextKind;
3521 
3522     unsigned Reg;
3523     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3524     if (ParseRes != MatchOperand_Success)
3525       return ParseRes;
3526 
3527     // Any Kind suffices must match on all regs in the list.
3528     if (Kind != NextKind) {
3529       Error(Loc, "mismatched register size suffix");
3530       return MatchOperand_ParseFail;
3531     }
3532 
3533     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3534 
3535     if (Space == 0 || Space > 3) {
3536       Error(Loc, "invalid number of vectors");
3537       return MatchOperand_ParseFail;
3538     }
3539 
3540     Count += Space;
3541   }
3542   else {
3543     while (parseOptionalToken(AsmToken::Comma)) {
3544       SMLoc Loc = getLoc();
3545       StringRef NextKind;
3546       unsigned Reg;
3547       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3548       if (ParseRes != MatchOperand_Success)
3549         return ParseRes;
3550 
3551       // Any Kind suffices must match on all regs in the list.
3552       if (Kind != NextKind) {
3553         Error(Loc, "mismatched register size suffix");
3554         return MatchOperand_ParseFail;
3555       }
3556 
3557       // Registers must be incremental (with wraparound at 31)
3558       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3559           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3560         Error(Loc, "registers must be sequential");
3561         return MatchOperand_ParseFail;
3562       }
3563 
3564       PrevReg = Reg;
3565       ++Count;
3566     }
3567   }
3568 
3569   if (parseToken(AsmToken::RCurly, "'}' expected"))
3570     return MatchOperand_ParseFail;
3571 
3572   if (Count > 4) {
3573     Error(S, "invalid number of vectors");
3574     return MatchOperand_ParseFail;
3575   }
3576 
3577   unsigned NumElements = 0;
3578   unsigned ElementWidth = 0;
3579   if (!Kind.empty()) {
3580     if (const auto &VK = parseVectorKind(Kind, VectorKind))
3581       std::tie(NumElements, ElementWidth) = *VK;
3582   }
3583 
3584   Operands.push_back(AArch64Operand::CreateVectorList(
3585       FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3586       getContext()));
3587 
3588   return MatchOperand_Success;
3589 }
3590 
3591 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3592 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3593   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3594   if (ParseRes != MatchOperand_Success)
3595     return true;
3596 
3597   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3598 }
3599 
3600 OperandMatchResultTy
3601 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3602   SMLoc StartLoc = getLoc();
3603 
3604   unsigned RegNum;
3605   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3606   if (Res != MatchOperand_Success)
3607     return Res;
3608 
3609   if (!parseOptionalToken(AsmToken::Comma)) {
3610     Operands.push_back(AArch64Operand::CreateReg(
3611         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3612     return MatchOperand_Success;
3613   }
3614 
3615   parseOptionalToken(AsmToken::Hash);
3616 
3617   if (getParser().getTok().isNot(AsmToken::Integer)) {
3618     Error(getLoc(), "index must be absent or #0");
3619     return MatchOperand_ParseFail;
3620   }
3621 
3622   const MCExpr *ImmVal;
3623   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3624       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3625     Error(getLoc(), "index must be absent or #0");
3626     return MatchOperand_ParseFail;
3627   }
3628 
3629   Operands.push_back(AArch64Operand::CreateReg(
3630       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3631   return MatchOperand_Success;
3632 }
3633 
3634 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3635 OperandMatchResultTy
3636 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3637   SMLoc StartLoc = getLoc();
3638 
3639   unsigned RegNum;
3640   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3641   if (Res != MatchOperand_Success)
3642     return Res;
3643 
3644   // No shift/extend is the default.
3645   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3646     Operands.push_back(AArch64Operand::CreateReg(
3647         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3648     return MatchOperand_Success;
3649   }
3650 
3651   // Eat the comma
3652   getParser().Lex();
3653 
3654   // Match the shift
3655   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3656   Res = tryParseOptionalShiftExtend(ExtOpnd);
3657   if (Res != MatchOperand_Success)
3658     return Res;
3659 
3660   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3661   Operands.push_back(AArch64Operand::CreateReg(
3662       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3663       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3664       Ext->hasShiftExtendAmount()));
3665 
3666   return MatchOperand_Success;
3667 }
3668 
3669 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3670   MCAsmParser &Parser = getParser();
3671 
3672   // Some SVE instructions have a decoration after the immediate, i.e.
3673   // "mul vl". We parse them here and add tokens, which must be present in the
3674   // asm string in the tablegen instruction.
3675   bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3676   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3677   if (!Parser.getTok().getString().equals_lower("mul") ||
3678       !(NextIsVL || NextIsHash))
3679     return true;
3680 
3681   Operands.push_back(
3682     AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3683   Parser.Lex(); // Eat the "mul"
3684 
3685   if (NextIsVL) {
3686     Operands.push_back(
3687         AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3688     Parser.Lex(); // Eat the "vl"
3689     return false;
3690   }
3691 
3692   if (NextIsHash) {
3693     Parser.Lex(); // Eat the #
3694     SMLoc S = getLoc();
3695 
3696     // Parse immediate operand.
3697     const MCExpr *ImmVal;
3698     if (!Parser.parseExpression(ImmVal))
3699       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3700         Operands.push_back(AArch64Operand::CreateImm(
3701             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3702             getContext()));
3703         return MatchOperand_Success;
3704       }
3705   }
3706 
3707   return Error(getLoc(), "expected 'vl' or '#<imm>'");
3708 }
3709 
3710 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
3711   MCAsmParser &Parser = getParser();
3712   auto Tok = Parser.getTok();
3713   if (Tok.isNot(AsmToken::Identifier))
3714     return true;
3715   Operands.push_back(AArch64Operand::CreateToken(Tok.getString(), false,
3716                                                  Tok.getLoc(), getContext()));
3717   Parser.Lex();
3718   return false;
3719 }
3720 
3721 /// parseOperand - Parse a arm instruction operand.  For now this parses the
3722 /// operand regardless of the mnemonic.
3723 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3724                                   bool invertCondCode) {
3725   MCAsmParser &Parser = getParser();
3726 
3727   OperandMatchResultTy ResTy =
3728       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3729 
3730   // Check if the current operand has a custom associated parser, if so, try to
3731   // custom parse the operand, or fallback to the general approach.
3732   if (ResTy == MatchOperand_Success)
3733     return false;
3734   // If there wasn't a custom match, try the generic matcher below. Otherwise,
3735   // there was a match, but an error occurred, in which case, just return that
3736   // the operand parsing failed.
3737   if (ResTy == MatchOperand_ParseFail)
3738     return true;
3739 
3740   // Nothing custom, so do general case parsing.
3741   SMLoc S, E;
3742   switch (getLexer().getKind()) {
3743   default: {
3744     SMLoc S = getLoc();
3745     const MCExpr *Expr;
3746     if (parseSymbolicImmVal(Expr))
3747       return Error(S, "invalid operand");
3748 
3749     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3750     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3751     return false;
3752   }
3753   case AsmToken::LBrac: {
3754     SMLoc Loc = Parser.getTok().getLoc();
3755     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3756                                                    getContext()));
3757     Parser.Lex(); // Eat '['
3758 
3759     // There's no comma after a '[', so we can parse the next operand
3760     // immediately.
3761     return parseOperand(Operands, false, false);
3762   }
3763   case AsmToken::LCurly:
3764     return parseNeonVectorList(Operands);
3765   case AsmToken::Identifier: {
3766     // If we're expecting a Condition Code operand, then just parse that.
3767     if (isCondCode)
3768       return parseCondCode(Operands, invertCondCode);
3769 
3770     // If it's a register name, parse it.
3771     if (!parseRegister(Operands))
3772       return false;
3773 
3774     // See if this is a "mul vl" decoration or "mul #<int>" operand used
3775     // by SVE instructions.
3776     if (!parseOptionalMulOperand(Operands))
3777       return false;
3778 
3779     // This could be an optional "shift" or "extend" operand.
3780     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3781     // We can only continue if no tokens were eaten.
3782     if (GotShift != MatchOperand_NoMatch)
3783       return GotShift;
3784 
3785     // If this is a two-word mnemonic, parse its special keyword
3786     // operand as an identifier.
3787     if (Mnemonic == "brb")
3788       return parseKeywordOperand(Operands);
3789 
3790     // This was not a register so parse other operands that start with an
3791     // identifier (like labels) as expressions and create them as immediates.
3792     const MCExpr *IdVal;
3793     S = getLoc();
3794     if (getParser().parseExpression(IdVal))
3795       return true;
3796     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3797     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3798     return false;
3799   }
3800   case AsmToken::Integer:
3801   case AsmToken::Real:
3802   case AsmToken::Hash: {
3803     // #42 -> immediate.
3804     S = getLoc();
3805 
3806     parseOptionalToken(AsmToken::Hash);
3807 
3808     // Parse a negative sign
3809     bool isNegative = false;
3810     if (Parser.getTok().is(AsmToken::Minus)) {
3811       isNegative = true;
3812       // We need to consume this token only when we have a Real, otherwise
3813       // we let parseSymbolicImmVal take care of it
3814       if (Parser.getLexer().peekTok().is(AsmToken::Real))
3815         Parser.Lex();
3816     }
3817 
3818     // The only Real that should come through here is a literal #0.0 for
3819     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3820     // so convert the value.
3821     const AsmToken &Tok = Parser.getTok();
3822     if (Tok.is(AsmToken::Real)) {
3823       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3824       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3825       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3826           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3827           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3828         return TokError("unexpected floating point literal");
3829       else if (IntVal != 0 || isNegative)
3830         return TokError("expected floating-point constant #0.0");
3831       Parser.Lex(); // Eat the token.
3832 
3833       Operands.push_back(
3834           AArch64Operand::CreateToken("#0", false, S, getContext()));
3835       Operands.push_back(
3836           AArch64Operand::CreateToken(".0", false, S, getContext()));
3837       return false;
3838     }
3839 
3840     const MCExpr *ImmVal;
3841     if (parseSymbolicImmVal(ImmVal))
3842       return true;
3843 
3844     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3845     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3846     return false;
3847   }
3848   case AsmToken::Equal: {
3849     SMLoc Loc = getLoc();
3850     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3851       return TokError("unexpected token in operand");
3852     Parser.Lex(); // Eat '='
3853     const MCExpr *SubExprVal;
3854     if (getParser().parseExpression(SubExprVal))
3855       return true;
3856 
3857     if (Operands.size() < 2 ||
3858         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3859       return Error(Loc, "Only valid when first operand is register");
3860 
3861     bool IsXReg =
3862         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3863             Operands[1]->getReg());
3864 
3865     MCContext& Ctx = getContext();
3866     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3867     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3868     if (isa<MCConstantExpr>(SubExprVal)) {
3869       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3870       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3871       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3872         ShiftAmt += 16;
3873         Imm >>= 16;
3874       }
3875       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3876           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3877           Operands.push_back(AArch64Operand::CreateImm(
3878                      MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3879         if (ShiftAmt)
3880           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3881                      ShiftAmt, true, S, E, Ctx));
3882         return false;
3883       }
3884       APInt Simm = APInt(64, Imm << ShiftAmt);
3885       // check if the immediate is an unsigned or signed 32-bit int for W regs
3886       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3887         return Error(Loc, "Immediate too large for register");
3888     }
3889     // If it is a label or an imm that cannot fit in a movz, put it into CP.
3890     const MCExpr *CPLoc =
3891         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3892     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3893     return false;
3894   }
3895   }
3896 }
3897 
3898 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
3899   const MCExpr *Expr = nullptr;
3900   SMLoc L = getLoc();
3901   if (check(getParser().parseExpression(Expr), L, "expected expression"))
3902     return true;
3903   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3904   if (check(!Value, L, "expected constant expression"))
3905     return true;
3906   Out = Value->getValue();
3907   return false;
3908 }
3909 
3910 bool AArch64AsmParser::parseComma() {
3911   if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
3912             "expected comma"))
3913     return true;
3914   // Eat the comma
3915   getParser().Lex();
3916   return false;
3917 }
3918 
3919 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
3920                                             unsigned First, unsigned Last) {
3921   unsigned Reg;
3922   SMLoc Start, End;
3923   if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
3924     return true;
3925 
3926   // Special handling for FP and LR; they aren't linearly after x28 in
3927   // the registers enum.
3928   unsigned RangeEnd = Last;
3929   if (Base == AArch64::X0) {
3930     if (Last == AArch64::FP) {
3931       RangeEnd = AArch64::X28;
3932       if (Reg == AArch64::FP) {
3933         Out = 29;
3934         return false;
3935       }
3936     }
3937     if (Last == AArch64::LR) {
3938       RangeEnd = AArch64::X28;
3939       if (Reg == AArch64::FP) {
3940         Out = 29;
3941         return false;
3942       } else if (Reg == AArch64::LR) {
3943         Out = 30;
3944         return false;
3945       }
3946     }
3947   }
3948 
3949   if (check(Reg < First || Reg > RangeEnd, Start,
3950             Twine("expected register in range ") +
3951                 AArch64InstPrinter::getRegisterName(First) + " to " +
3952                 AArch64InstPrinter::getRegisterName(Last)))
3953     return true;
3954   Out = Reg - Base;
3955   return false;
3956 }
3957 
3958 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3959                                  const MCParsedAsmOperand &Op2) const {
3960   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3961   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3962   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3963       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3964     return MCTargetAsmParser::regsEqual(Op1, Op2);
3965 
3966   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3967          "Testing equality of non-scalar registers not supported");
3968 
3969   // Check if a registers match their sub/super register classes.
3970   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3971     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3972   if (AOp1.getRegEqualityTy() == EqualsSubReg)
3973     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3974   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3975     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3976   if (AOp2.getRegEqualityTy() == EqualsSubReg)
3977     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3978 
3979   return false;
3980 }
3981 
3982 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3983 /// operands.
3984 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3985                                         StringRef Name, SMLoc NameLoc,
3986                                         OperandVector &Operands) {
3987   MCAsmParser &Parser = getParser();
3988   Name = StringSwitch<StringRef>(Name.lower())
3989              .Case("beq", "b.eq")
3990              .Case("bne", "b.ne")
3991              .Case("bhs", "b.hs")
3992              .Case("bcs", "b.cs")
3993              .Case("blo", "b.lo")
3994              .Case("bcc", "b.cc")
3995              .Case("bmi", "b.mi")
3996              .Case("bpl", "b.pl")
3997              .Case("bvs", "b.vs")
3998              .Case("bvc", "b.vc")
3999              .Case("bhi", "b.hi")
4000              .Case("bls", "b.ls")
4001              .Case("bge", "b.ge")
4002              .Case("blt", "b.lt")
4003              .Case("bgt", "b.gt")
4004              .Case("ble", "b.le")
4005              .Case("bal", "b.al")
4006              .Case("bnv", "b.nv")
4007              .Default(Name);
4008 
4009   // First check for the AArch64-specific .req directive.
4010   if (Parser.getTok().is(AsmToken::Identifier) &&
4011       Parser.getTok().getIdentifier().lower() == ".req") {
4012     parseDirectiveReq(Name, NameLoc);
4013     // We always return 'error' for this, as we're done with this
4014     // statement and don't need to match the 'instruction."
4015     return true;
4016   }
4017 
4018   // Create the leading tokens for the mnemonic, split by '.' characters.
4019   size_t Start = 0, Next = Name.find('.');
4020   StringRef Head = Name.slice(Start, Next);
4021 
4022   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4023   // the SYS instruction.
4024   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4025       Head == "cfp" || Head == "dvp" || Head == "cpp")
4026     return parseSysAlias(Head, NameLoc, Operands);
4027 
4028   Operands.push_back(
4029       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
4030   Mnemonic = Head;
4031 
4032   // Handle condition codes for a branch mnemonic
4033   if (Head == "b" && Next != StringRef::npos) {
4034     Start = Next;
4035     Next = Name.find('.', Start + 1);
4036     Head = Name.slice(Start + 1, Next);
4037 
4038     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4039                                             (Head.data() - Name.data()));
4040     AArch64CC::CondCode CC = parseCondCodeString(Head);
4041     if (CC == AArch64CC::Invalid)
4042       return Error(SuffixLoc, "invalid condition code");
4043     Operands.push_back(
4044         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
4045     Operands.push_back(
4046         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4047   }
4048 
4049   // Add the remaining tokens in the mnemonic.
4050   while (Next != StringRef::npos) {
4051     Start = Next;
4052     Next = Name.find('.', Start + 1);
4053     Head = Name.slice(Start, Next);
4054     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4055                                             (Head.data() - Name.data()) + 1);
4056     Operands.push_back(
4057         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
4058   }
4059 
4060   // Conditional compare instructions have a Condition Code operand, which needs
4061   // to be parsed and an immediate operand created.
4062   bool condCodeFourthOperand =
4063       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4064        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4065        Head == "csinc" || Head == "csinv" || Head == "csneg");
4066 
4067   // These instructions are aliases to some of the conditional select
4068   // instructions. However, the condition code is inverted in the aliased
4069   // instruction.
4070   //
4071   // FIXME: Is this the correct way to handle these? Or should the parser
4072   //        generate the aliased instructions directly?
4073   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4074   bool condCodeThirdOperand =
4075       (Head == "cinc" || Head == "cinv" || Head == "cneg");
4076 
4077   // Read the remaining operands.
4078   if (getLexer().isNot(AsmToken::EndOfStatement)) {
4079 
4080     unsigned N = 1;
4081     do {
4082       // Parse and remember the operand.
4083       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4084                                      (N == 3 && condCodeThirdOperand) ||
4085                                      (N == 2 && condCodeSecondOperand),
4086                        condCodeSecondOperand || condCodeThirdOperand)) {
4087         return true;
4088       }
4089 
4090       // After successfully parsing some operands there are two special cases to
4091       // consider (i.e. notional operands not separated by commas). Both are due
4092       // to memory specifiers:
4093       //  + An RBrac will end an address for load/store/prefetch
4094       //  + An '!' will indicate a pre-indexed operation.
4095       //
4096       // It's someone else's responsibility to make sure these tokens are sane
4097       // in the given context!
4098 
4099       SMLoc RLoc = Parser.getTok().getLoc();
4100       if (parseOptionalToken(AsmToken::RBrac))
4101         Operands.push_back(
4102             AArch64Operand::CreateToken("]", false, RLoc, getContext()));
4103       SMLoc ELoc = Parser.getTok().getLoc();
4104       if (parseOptionalToken(AsmToken::Exclaim))
4105         Operands.push_back(
4106             AArch64Operand::CreateToken("!", false, ELoc, getContext()));
4107 
4108       ++N;
4109     } while (parseOptionalToken(AsmToken::Comma));
4110   }
4111 
4112   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4113     return true;
4114 
4115   return false;
4116 }
4117 
4118 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4119   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4120   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4121          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4122          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4123          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4124          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4125          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4126 }
4127 
4128 // FIXME: This entire function is a giant hack to provide us with decent
4129 // operand range validation/diagnostics until TableGen/MC can be extended
4130 // to support autogeneration of this kind of validation.
4131 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4132                                            SmallVectorImpl<SMLoc> &Loc) {
4133   const MCRegisterInfo *RI = getContext().getRegisterInfo();
4134   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4135 
4136   // A prefix only applies to the instruction following it.  Here we extract
4137   // prefix information for the next instruction before validating the current
4138   // one so that in the case of failure we don't erronously continue using the
4139   // current prefix.
4140   PrefixInfo Prefix = NextPrefix;
4141   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4142 
4143   // Before validating the instruction in isolation we run through the rules
4144   // applicable when it follows a prefix instruction.
4145   // NOTE: brk & hlt can be prefixed but require no additional validation.
4146   if (Prefix.isActive() &&
4147       (Inst.getOpcode() != AArch64::BRK) &&
4148       (Inst.getOpcode() != AArch64::HLT)) {
4149 
4150     // Prefixed intructions must have a destructive operand.
4151     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4152         AArch64::NotDestructive)
4153       return Error(IDLoc, "instruction is unpredictable when following a"
4154                    " movprfx, suggest replacing movprfx with mov");
4155 
4156     // Destination operands must match.
4157     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4158       return Error(Loc[0], "instruction is unpredictable when following a"
4159                    " movprfx writing to a different destination");
4160 
4161     // Destination operand must not be used in any other location.
4162     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4163       if (Inst.getOperand(i).isReg() &&
4164           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4165           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4166         return Error(Loc[0], "instruction is unpredictable when following a"
4167                      " movprfx and destination also used as non-destructive"
4168                      " source");
4169     }
4170 
4171     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4172     if (Prefix.isPredicated()) {
4173       int PgIdx = -1;
4174 
4175       // Find the instructions general predicate.
4176       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4177         if (Inst.getOperand(i).isReg() &&
4178             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4179           PgIdx = i;
4180           break;
4181         }
4182 
4183       // Instruction must be predicated if the movprfx is predicated.
4184       if (PgIdx == -1 ||
4185           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4186         return Error(IDLoc, "instruction is unpredictable when following a"
4187                      " predicated movprfx, suggest using unpredicated movprfx");
4188 
4189       // Instruction must use same general predicate as the movprfx.
4190       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4191         return Error(IDLoc, "instruction is unpredictable when following a"
4192                      " predicated movprfx using a different general predicate");
4193 
4194       // Instruction element type must match the movprfx.
4195       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4196         return Error(IDLoc, "instruction is unpredictable when following a"
4197                      " predicated movprfx with a different element size");
4198     }
4199   }
4200 
4201   // Check for indexed addressing modes w/ the base register being the
4202   // same as a destination/source register or pair load where
4203   // the Rt == Rt2. All of those are undefined behaviour.
4204   switch (Inst.getOpcode()) {
4205   case AArch64::LDPSWpre:
4206   case AArch64::LDPWpost:
4207   case AArch64::LDPWpre:
4208   case AArch64::LDPXpost:
4209   case AArch64::LDPXpre: {
4210     unsigned Rt = Inst.getOperand(1).getReg();
4211     unsigned Rt2 = Inst.getOperand(2).getReg();
4212     unsigned Rn = Inst.getOperand(3).getReg();
4213     if (RI->isSubRegisterEq(Rn, Rt))
4214       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4215                            "is also a destination");
4216     if (RI->isSubRegisterEq(Rn, Rt2))
4217       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4218                            "is also a destination");
4219     LLVM_FALLTHROUGH;
4220   }
4221   case AArch64::LDPDi:
4222   case AArch64::LDPQi:
4223   case AArch64::LDPSi:
4224   case AArch64::LDPSWi:
4225   case AArch64::LDPWi:
4226   case AArch64::LDPXi: {
4227     unsigned Rt = Inst.getOperand(0).getReg();
4228     unsigned Rt2 = Inst.getOperand(1).getReg();
4229     if (Rt == Rt2)
4230       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4231     break;
4232   }
4233   case AArch64::LDPDpost:
4234   case AArch64::LDPDpre:
4235   case AArch64::LDPQpost:
4236   case AArch64::LDPQpre:
4237   case AArch64::LDPSpost:
4238   case AArch64::LDPSpre:
4239   case AArch64::LDPSWpost: {
4240     unsigned Rt = Inst.getOperand(1).getReg();
4241     unsigned Rt2 = Inst.getOperand(2).getReg();
4242     if (Rt == Rt2)
4243       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4244     break;
4245   }
4246   case AArch64::STPDpost:
4247   case AArch64::STPDpre:
4248   case AArch64::STPQpost:
4249   case AArch64::STPQpre:
4250   case AArch64::STPSpost:
4251   case AArch64::STPSpre:
4252   case AArch64::STPWpost:
4253   case AArch64::STPWpre:
4254   case AArch64::STPXpost:
4255   case AArch64::STPXpre: {
4256     unsigned Rt = Inst.getOperand(1).getReg();
4257     unsigned Rt2 = Inst.getOperand(2).getReg();
4258     unsigned Rn = Inst.getOperand(3).getReg();
4259     if (RI->isSubRegisterEq(Rn, Rt))
4260       return Error(Loc[0], "unpredictable STP instruction, writeback base "
4261                            "is also a source");
4262     if (RI->isSubRegisterEq(Rn, Rt2))
4263       return Error(Loc[1], "unpredictable STP instruction, writeback base "
4264                            "is also a source");
4265     break;
4266   }
4267   case AArch64::LDRBBpre:
4268   case AArch64::LDRBpre:
4269   case AArch64::LDRHHpre:
4270   case AArch64::LDRHpre:
4271   case AArch64::LDRSBWpre:
4272   case AArch64::LDRSBXpre:
4273   case AArch64::LDRSHWpre:
4274   case AArch64::LDRSHXpre:
4275   case AArch64::LDRSWpre:
4276   case AArch64::LDRWpre:
4277   case AArch64::LDRXpre:
4278   case AArch64::LDRBBpost:
4279   case AArch64::LDRBpost:
4280   case AArch64::LDRHHpost:
4281   case AArch64::LDRHpost:
4282   case AArch64::LDRSBWpost:
4283   case AArch64::LDRSBXpost:
4284   case AArch64::LDRSHWpost:
4285   case AArch64::LDRSHXpost:
4286   case AArch64::LDRSWpost:
4287   case AArch64::LDRWpost:
4288   case AArch64::LDRXpost: {
4289     unsigned Rt = Inst.getOperand(1).getReg();
4290     unsigned Rn = Inst.getOperand(2).getReg();
4291     if (RI->isSubRegisterEq(Rn, Rt))
4292       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4293                            "is also a source");
4294     break;
4295   }
4296   case AArch64::STRBBpost:
4297   case AArch64::STRBpost:
4298   case AArch64::STRHHpost:
4299   case AArch64::STRHpost:
4300   case AArch64::STRWpost:
4301   case AArch64::STRXpost:
4302   case AArch64::STRBBpre:
4303   case AArch64::STRBpre:
4304   case AArch64::STRHHpre:
4305   case AArch64::STRHpre:
4306   case AArch64::STRWpre:
4307   case AArch64::STRXpre: {
4308     unsigned Rt = Inst.getOperand(1).getReg();
4309     unsigned Rn = Inst.getOperand(2).getReg();
4310     if (RI->isSubRegisterEq(Rn, Rt))
4311       return Error(Loc[0], "unpredictable STR instruction, writeback base "
4312                            "is also a source");
4313     break;
4314   }
4315   case AArch64::STXRB:
4316   case AArch64::STXRH:
4317   case AArch64::STXRW:
4318   case AArch64::STXRX:
4319   case AArch64::STLXRB:
4320   case AArch64::STLXRH:
4321   case AArch64::STLXRW:
4322   case AArch64::STLXRX: {
4323     unsigned Rs = Inst.getOperand(0).getReg();
4324     unsigned Rt = Inst.getOperand(1).getReg();
4325     unsigned Rn = Inst.getOperand(2).getReg();
4326     if (RI->isSubRegisterEq(Rt, Rs) ||
4327         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4328       return Error(Loc[0],
4329                    "unpredictable STXR instruction, status is also a source");
4330     break;
4331   }
4332   case AArch64::STXPW:
4333   case AArch64::STXPX:
4334   case AArch64::STLXPW:
4335   case AArch64::STLXPX: {
4336     unsigned Rs = Inst.getOperand(0).getReg();
4337     unsigned Rt1 = Inst.getOperand(1).getReg();
4338     unsigned Rt2 = Inst.getOperand(2).getReg();
4339     unsigned Rn = Inst.getOperand(3).getReg();
4340     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4341         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4342       return Error(Loc[0],
4343                    "unpredictable STXP instruction, status is also a source");
4344     break;
4345   }
4346   case AArch64::LDRABwriteback:
4347   case AArch64::LDRAAwriteback: {
4348     unsigned Xt = Inst.getOperand(0).getReg();
4349     unsigned Xn = Inst.getOperand(1).getReg();
4350     if (Xt == Xn)
4351       return Error(Loc[0],
4352           "unpredictable LDRA instruction, writeback base"
4353           " is also a destination");
4354     break;
4355   }
4356   }
4357 
4358 
4359   // Now check immediate ranges. Separate from the above as there is overlap
4360   // in the instructions being checked and this keeps the nested conditionals
4361   // to a minimum.
4362   switch (Inst.getOpcode()) {
4363   case AArch64::ADDSWri:
4364   case AArch64::ADDSXri:
4365   case AArch64::ADDWri:
4366   case AArch64::ADDXri:
4367   case AArch64::SUBSWri:
4368   case AArch64::SUBSXri:
4369   case AArch64::SUBWri:
4370   case AArch64::SUBXri: {
4371     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4372     // some slight duplication here.
4373     if (Inst.getOperand(2).isExpr()) {
4374       const MCExpr *Expr = Inst.getOperand(2).getExpr();
4375       AArch64MCExpr::VariantKind ELFRefKind;
4376       MCSymbolRefExpr::VariantKind DarwinRefKind;
4377       int64_t Addend;
4378       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4379 
4380         // Only allow these with ADDXri.
4381         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4382              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4383             Inst.getOpcode() == AArch64::ADDXri)
4384           return false;
4385 
4386         // Only allow these with ADDXri/ADDWri
4387         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4388              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4389              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4390              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4391              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4392              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4393              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4394              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4395              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4396              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4397             (Inst.getOpcode() == AArch64::ADDXri ||
4398              Inst.getOpcode() == AArch64::ADDWri))
4399           return false;
4400 
4401         // Don't allow symbol refs in the immediate field otherwise
4402         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4403         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4404         // 'cmp w0, 'borked')
4405         return Error(Loc.back(), "invalid immediate expression");
4406       }
4407       // We don't validate more complex expressions here
4408     }
4409     return false;
4410   }
4411   default:
4412     return false;
4413   }
4414 }
4415 
4416 static std::string AArch64MnemonicSpellCheck(StringRef S,
4417                                              const FeatureBitset &FBS,
4418                                              unsigned VariantID = 0);
4419 
4420 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4421                                       uint64_t ErrorInfo,
4422                                       OperandVector &Operands) {
4423   switch (ErrCode) {
4424   case Match_InvalidTiedOperand: {
4425     RegConstraintEqualityTy EqTy =
4426         static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4427             .getRegEqualityTy();
4428     switch (EqTy) {
4429     case RegConstraintEqualityTy::EqualsSubReg:
4430       return Error(Loc, "operand must be 64-bit form of destination register");
4431     case RegConstraintEqualityTy::EqualsSuperReg:
4432       return Error(Loc, "operand must be 32-bit form of destination register");
4433     case RegConstraintEqualityTy::EqualsReg:
4434       return Error(Loc, "operand must match destination register");
4435     }
4436     llvm_unreachable("Unknown RegConstraintEqualityTy");
4437   }
4438   case Match_MissingFeature:
4439     return Error(Loc,
4440                  "instruction requires a CPU feature not currently enabled");
4441   case Match_InvalidOperand:
4442     return Error(Loc, "invalid operand for instruction");
4443   case Match_InvalidSuffix:
4444     return Error(Loc, "invalid type suffix for instruction");
4445   case Match_InvalidCondCode:
4446     return Error(Loc, "expected AArch64 condition code");
4447   case Match_AddSubRegExtendSmall:
4448     return Error(Loc,
4449       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4450   case Match_AddSubRegExtendLarge:
4451     return Error(Loc,
4452       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4453   case Match_AddSubSecondSource:
4454     return Error(Loc,
4455       "expected compatible register, symbol or integer in range [0, 4095]");
4456   case Match_LogicalSecondSource:
4457     return Error(Loc, "expected compatible register or logical immediate");
4458   case Match_InvalidMovImm32Shift:
4459     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4460   case Match_InvalidMovImm64Shift:
4461     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4462   case Match_AddSubRegShift32:
4463     return Error(Loc,
4464        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4465   case Match_AddSubRegShift64:
4466     return Error(Loc,
4467        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4468   case Match_InvalidFPImm:
4469     return Error(Loc,
4470                  "expected compatible register or floating-point constant");
4471   case Match_InvalidMemoryIndexedSImm6:
4472     return Error(Loc, "index must be an integer in range [-32, 31].");
4473   case Match_InvalidMemoryIndexedSImm5:
4474     return Error(Loc, "index must be an integer in range [-16, 15].");
4475   case Match_InvalidMemoryIndexed1SImm4:
4476     return Error(Loc, "index must be an integer in range [-8, 7].");
4477   case Match_InvalidMemoryIndexed2SImm4:
4478     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4479   case Match_InvalidMemoryIndexed3SImm4:
4480     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4481   case Match_InvalidMemoryIndexed4SImm4:
4482     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4483   case Match_InvalidMemoryIndexed16SImm4:
4484     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4485   case Match_InvalidMemoryIndexed32SImm4:
4486     return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
4487   case Match_InvalidMemoryIndexed1SImm6:
4488     return Error(Loc, "index must be an integer in range [-32, 31].");
4489   case Match_InvalidMemoryIndexedSImm8:
4490     return Error(Loc, "index must be an integer in range [-128, 127].");
4491   case Match_InvalidMemoryIndexedSImm9:
4492     return Error(Loc, "index must be an integer in range [-256, 255].");
4493   case Match_InvalidMemoryIndexed16SImm9:
4494     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4495   case Match_InvalidMemoryIndexed8SImm10:
4496     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4497   case Match_InvalidMemoryIndexed4SImm7:
4498     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4499   case Match_InvalidMemoryIndexed8SImm7:
4500     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4501   case Match_InvalidMemoryIndexed16SImm7:
4502     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4503   case Match_InvalidMemoryIndexed8UImm5:
4504     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4505   case Match_InvalidMemoryIndexed4UImm5:
4506     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4507   case Match_InvalidMemoryIndexed2UImm5:
4508     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4509   case Match_InvalidMemoryIndexed8UImm6:
4510     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4511   case Match_InvalidMemoryIndexed16UImm6:
4512     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4513   case Match_InvalidMemoryIndexed4UImm6:
4514     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4515   case Match_InvalidMemoryIndexed2UImm6:
4516     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4517   case Match_InvalidMemoryIndexed1UImm6:
4518     return Error(Loc, "index must be in range [0, 63].");
4519   case Match_InvalidMemoryWExtend8:
4520     return Error(Loc,
4521                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4522   case Match_InvalidMemoryWExtend16:
4523     return Error(Loc,
4524                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4525   case Match_InvalidMemoryWExtend32:
4526     return Error(Loc,
4527                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4528   case Match_InvalidMemoryWExtend64:
4529     return Error(Loc,
4530                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4531   case Match_InvalidMemoryWExtend128:
4532     return Error(Loc,
4533                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4534   case Match_InvalidMemoryXExtend8:
4535     return Error(Loc,
4536                  "expected 'lsl' or 'sxtx' with optional shift of #0");
4537   case Match_InvalidMemoryXExtend16:
4538     return Error(Loc,
4539                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4540   case Match_InvalidMemoryXExtend32:
4541     return Error(Loc,
4542                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4543   case Match_InvalidMemoryXExtend64:
4544     return Error(Loc,
4545                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4546   case Match_InvalidMemoryXExtend128:
4547     return Error(Loc,
4548                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4549   case Match_InvalidMemoryIndexed1:
4550     return Error(Loc, "index must be an integer in range [0, 4095].");
4551   case Match_InvalidMemoryIndexed2:
4552     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4553   case Match_InvalidMemoryIndexed4:
4554     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4555   case Match_InvalidMemoryIndexed8:
4556     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4557   case Match_InvalidMemoryIndexed16:
4558     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4559   case Match_InvalidImm0_1:
4560     return Error(Loc, "immediate must be an integer in range [0, 1].");
4561   case Match_InvalidImm0_7:
4562     return Error(Loc, "immediate must be an integer in range [0, 7].");
4563   case Match_InvalidImm0_15:
4564     return Error(Loc, "immediate must be an integer in range [0, 15].");
4565   case Match_InvalidImm0_31:
4566     return Error(Loc, "immediate must be an integer in range [0, 31].");
4567   case Match_InvalidImm0_63:
4568     return Error(Loc, "immediate must be an integer in range [0, 63].");
4569   case Match_InvalidImm0_127:
4570     return Error(Loc, "immediate must be an integer in range [0, 127].");
4571   case Match_InvalidImm0_255:
4572     return Error(Loc, "immediate must be an integer in range [0, 255].");
4573   case Match_InvalidImm0_65535:
4574     return Error(Loc, "immediate must be an integer in range [0, 65535].");
4575   case Match_InvalidImm1_8:
4576     return Error(Loc, "immediate must be an integer in range [1, 8].");
4577   case Match_InvalidImm1_16:
4578     return Error(Loc, "immediate must be an integer in range [1, 16].");
4579   case Match_InvalidImm1_32:
4580     return Error(Loc, "immediate must be an integer in range [1, 32].");
4581   case Match_InvalidImm1_64:
4582     return Error(Loc, "immediate must be an integer in range [1, 64].");
4583   case Match_InvalidSVEAddSubImm8:
4584     return Error(Loc, "immediate must be an integer in range [0, 255]"
4585                       " with a shift amount of 0");
4586   case Match_InvalidSVEAddSubImm16:
4587   case Match_InvalidSVEAddSubImm32:
4588   case Match_InvalidSVEAddSubImm64:
4589     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4590                       "multiple of 256 in range [256, 65280]");
4591   case Match_InvalidSVECpyImm8:
4592     return Error(Loc, "immediate must be an integer in range [-128, 255]"
4593                       " with a shift amount of 0");
4594   case Match_InvalidSVECpyImm16:
4595     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4596                       "multiple of 256 in range [-32768, 65280]");
4597   case Match_InvalidSVECpyImm32:
4598   case Match_InvalidSVECpyImm64:
4599     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4600                       "multiple of 256 in range [-32768, 32512]");
4601   case Match_InvalidIndexRange1_1:
4602     return Error(Loc, "expected lane specifier '[1]'");
4603   case Match_InvalidIndexRange0_15:
4604     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4605   case Match_InvalidIndexRange0_7:
4606     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4607   case Match_InvalidIndexRange0_3:
4608     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4609   case Match_InvalidIndexRange0_1:
4610     return Error(Loc, "vector lane must be an integer in range [0, 1].");
4611   case Match_InvalidSVEIndexRange0_63:
4612     return Error(Loc, "vector lane must be an integer in range [0, 63].");
4613   case Match_InvalidSVEIndexRange0_31:
4614     return Error(Loc, "vector lane must be an integer in range [0, 31].");
4615   case Match_InvalidSVEIndexRange0_15:
4616     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4617   case Match_InvalidSVEIndexRange0_7:
4618     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4619   case Match_InvalidSVEIndexRange0_3:
4620     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4621   case Match_InvalidLabel:
4622     return Error(Loc, "expected label or encodable integer pc offset");
4623   case Match_MRS:
4624     return Error(Loc, "expected readable system register");
4625   case Match_MSR:
4626     return Error(Loc, "expected writable system register or pstate");
4627   case Match_InvalidComplexRotationEven:
4628     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4629   case Match_InvalidComplexRotationOdd:
4630     return Error(Loc, "complex rotation must be 90 or 270.");
4631   case Match_MnemonicFail: {
4632     std::string Suggestion = AArch64MnemonicSpellCheck(
4633         ((AArch64Operand &)*Operands[0]).getToken(),
4634         ComputeAvailableFeatures(STI->getFeatureBits()));
4635     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4636   }
4637   case Match_InvalidGPR64shifted8:
4638     return Error(Loc, "register must be x0..x30 or xzr, without shift");
4639   case Match_InvalidGPR64shifted16:
4640     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4641   case Match_InvalidGPR64shifted32:
4642     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4643   case Match_InvalidGPR64shifted64:
4644     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4645   case Match_InvalidGPR64NoXZRshifted8:
4646     return Error(Loc, "register must be x0..x30 without shift");
4647   case Match_InvalidGPR64NoXZRshifted16:
4648     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4649   case Match_InvalidGPR64NoXZRshifted32:
4650     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4651   case Match_InvalidGPR64NoXZRshifted64:
4652     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4653   case Match_InvalidZPR32UXTW8:
4654   case Match_InvalidZPR32SXTW8:
4655     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4656   case Match_InvalidZPR32UXTW16:
4657   case Match_InvalidZPR32SXTW16:
4658     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4659   case Match_InvalidZPR32UXTW32:
4660   case Match_InvalidZPR32SXTW32:
4661     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4662   case Match_InvalidZPR32UXTW64:
4663   case Match_InvalidZPR32SXTW64:
4664     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4665   case Match_InvalidZPR64UXTW8:
4666   case Match_InvalidZPR64SXTW8:
4667     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4668   case Match_InvalidZPR64UXTW16:
4669   case Match_InvalidZPR64SXTW16:
4670     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4671   case Match_InvalidZPR64UXTW32:
4672   case Match_InvalidZPR64SXTW32:
4673     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4674   case Match_InvalidZPR64UXTW64:
4675   case Match_InvalidZPR64SXTW64:
4676     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4677   case Match_InvalidZPR32LSL8:
4678     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4679   case Match_InvalidZPR32LSL16:
4680     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4681   case Match_InvalidZPR32LSL32:
4682     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4683   case Match_InvalidZPR32LSL64:
4684     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4685   case Match_InvalidZPR64LSL8:
4686     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4687   case Match_InvalidZPR64LSL16:
4688     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4689   case Match_InvalidZPR64LSL32:
4690     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4691   case Match_InvalidZPR64LSL64:
4692     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4693   case Match_InvalidZPR0:
4694     return Error(Loc, "expected register without element width suffix");
4695   case Match_InvalidZPR8:
4696   case Match_InvalidZPR16:
4697   case Match_InvalidZPR32:
4698   case Match_InvalidZPR64:
4699   case Match_InvalidZPR128:
4700     return Error(Loc, "invalid element width");
4701   case Match_InvalidZPR_3b8:
4702     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4703   case Match_InvalidZPR_3b16:
4704     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4705   case Match_InvalidZPR_3b32:
4706     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4707   case Match_InvalidZPR_4b16:
4708     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4709   case Match_InvalidZPR_4b32:
4710     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4711   case Match_InvalidZPR_4b64:
4712     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4713   case Match_InvalidSVEPattern:
4714     return Error(Loc, "invalid predicate pattern");
4715   case Match_InvalidSVEPredicateAnyReg:
4716   case Match_InvalidSVEPredicateBReg:
4717   case Match_InvalidSVEPredicateHReg:
4718   case Match_InvalidSVEPredicateSReg:
4719   case Match_InvalidSVEPredicateDReg:
4720     return Error(Loc, "invalid predicate register.");
4721   case Match_InvalidSVEPredicate3bAnyReg:
4722     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4723   case Match_InvalidSVEPredicate3bBReg:
4724     return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4725   case Match_InvalidSVEPredicate3bHReg:
4726     return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4727   case Match_InvalidSVEPredicate3bSReg:
4728     return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4729   case Match_InvalidSVEPredicate3bDReg:
4730     return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4731   case Match_InvalidSVEExactFPImmOperandHalfOne:
4732     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4733   case Match_InvalidSVEExactFPImmOperandHalfTwo:
4734     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4735   case Match_InvalidSVEExactFPImmOperandZeroOne:
4736     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4737   default:
4738     llvm_unreachable("unexpected error code!");
4739   }
4740 }
4741 
4742 static const char *getSubtargetFeatureName(uint64_t Val);
4743 
4744 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4745                                                OperandVector &Operands,
4746                                                MCStreamer &Out,
4747                                                uint64_t &ErrorInfo,
4748                                                bool MatchingInlineAsm) {
4749   assert(!Operands.empty() && "Unexpect empty operand list!");
4750   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4751   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4752 
4753   StringRef Tok = Op.getToken();
4754   unsigned NumOperands = Operands.size();
4755 
4756   if (NumOperands == 4 && Tok == "lsl") {
4757     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4758     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4759     if (Op2.isScalarReg() && Op3.isImm()) {
4760       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4761       if (Op3CE) {
4762         uint64_t Op3Val = Op3CE->getValue();
4763         uint64_t NewOp3Val = 0;
4764         uint64_t NewOp4Val = 0;
4765         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4766                 Op2.getReg())) {
4767           NewOp3Val = (32 - Op3Val) & 0x1f;
4768           NewOp4Val = 31 - Op3Val;
4769         } else {
4770           NewOp3Val = (64 - Op3Val) & 0x3f;
4771           NewOp4Val = 63 - Op3Val;
4772         }
4773 
4774         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4775         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4776 
4777         Operands[0] = AArch64Operand::CreateToken(
4778             "ubfm", false, Op.getStartLoc(), getContext());
4779         Operands.push_back(AArch64Operand::CreateImm(
4780             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4781         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4782                                                 Op3.getEndLoc(), getContext());
4783       }
4784     }
4785   } else if (NumOperands == 4 && Tok == "bfc") {
4786     // FIXME: Horrible hack to handle BFC->BFM alias.
4787     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4788     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4789     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4790 
4791     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4792       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4793       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4794 
4795       if (LSBCE && WidthCE) {
4796         uint64_t LSB = LSBCE->getValue();
4797         uint64_t Width = WidthCE->getValue();
4798 
4799         uint64_t RegWidth = 0;
4800         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4801                 Op1.getReg()))
4802           RegWidth = 64;
4803         else
4804           RegWidth = 32;
4805 
4806         if (LSB >= RegWidth)
4807           return Error(LSBOp.getStartLoc(),
4808                        "expected integer in range [0, 31]");
4809         if (Width < 1 || Width > RegWidth)
4810           return Error(WidthOp.getStartLoc(),
4811                        "expected integer in range [1, 32]");
4812 
4813         uint64_t ImmR = 0;
4814         if (RegWidth == 32)
4815           ImmR = (32 - LSB) & 0x1f;
4816         else
4817           ImmR = (64 - LSB) & 0x3f;
4818 
4819         uint64_t ImmS = Width - 1;
4820 
4821         if (ImmR != 0 && ImmS >= ImmR)
4822           return Error(WidthOp.getStartLoc(),
4823                        "requested insert overflows register");
4824 
4825         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4826         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4827         Operands[0] = AArch64Operand::CreateToken(
4828               "bfm", false, Op.getStartLoc(), getContext());
4829         Operands[2] = AArch64Operand::CreateReg(
4830             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4831             SMLoc(), SMLoc(), getContext());
4832         Operands[3] = AArch64Operand::CreateImm(
4833             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4834         Operands.emplace_back(
4835             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4836                                       WidthOp.getEndLoc(), getContext()));
4837       }
4838     }
4839   } else if (NumOperands == 5) {
4840     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4841     // UBFIZ -> UBFM aliases.
4842     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4843       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4844       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4845       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4846 
4847       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4848         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4849         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4850 
4851         if (Op3CE && Op4CE) {
4852           uint64_t Op3Val = Op3CE->getValue();
4853           uint64_t Op4Val = Op4CE->getValue();
4854 
4855           uint64_t RegWidth = 0;
4856           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4857                   Op1.getReg()))
4858             RegWidth = 64;
4859           else
4860             RegWidth = 32;
4861 
4862           if (Op3Val >= RegWidth)
4863             return Error(Op3.getStartLoc(),
4864                          "expected integer in range [0, 31]");
4865           if (Op4Val < 1 || Op4Val > RegWidth)
4866             return Error(Op4.getStartLoc(),
4867                          "expected integer in range [1, 32]");
4868 
4869           uint64_t NewOp3Val = 0;
4870           if (RegWidth == 32)
4871             NewOp3Val = (32 - Op3Val) & 0x1f;
4872           else
4873             NewOp3Val = (64 - Op3Val) & 0x3f;
4874 
4875           uint64_t NewOp4Val = Op4Val - 1;
4876 
4877           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4878             return Error(Op4.getStartLoc(),
4879                          "requested insert overflows register");
4880 
4881           const MCExpr *NewOp3 =
4882               MCConstantExpr::create(NewOp3Val, getContext());
4883           const MCExpr *NewOp4 =
4884               MCConstantExpr::create(NewOp4Val, getContext());
4885           Operands[3] = AArch64Operand::CreateImm(
4886               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4887           Operands[4] = AArch64Operand::CreateImm(
4888               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4889           if (Tok == "bfi")
4890             Operands[0] = AArch64Operand::CreateToken(
4891                 "bfm", false, Op.getStartLoc(), getContext());
4892           else if (Tok == "sbfiz")
4893             Operands[0] = AArch64Operand::CreateToken(
4894                 "sbfm", false, Op.getStartLoc(), getContext());
4895           else if (Tok == "ubfiz")
4896             Operands[0] = AArch64Operand::CreateToken(
4897                 "ubfm", false, Op.getStartLoc(), getContext());
4898           else
4899             llvm_unreachable("No valid mnemonic for alias?");
4900         }
4901       }
4902 
4903       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4904       // UBFX -> UBFM aliases.
4905     } else if (NumOperands == 5 &&
4906                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4907       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4908       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4909       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4910 
4911       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4912         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4913         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4914 
4915         if (Op3CE && Op4CE) {
4916           uint64_t Op3Val = Op3CE->getValue();
4917           uint64_t Op4Val = Op4CE->getValue();
4918 
4919           uint64_t RegWidth = 0;
4920           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4921                   Op1.getReg()))
4922             RegWidth = 64;
4923           else
4924             RegWidth = 32;
4925 
4926           if (Op3Val >= RegWidth)
4927             return Error(Op3.getStartLoc(),
4928                          "expected integer in range [0, 31]");
4929           if (Op4Val < 1 || Op4Val > RegWidth)
4930             return Error(Op4.getStartLoc(),
4931                          "expected integer in range [1, 32]");
4932 
4933           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4934 
4935           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4936             return Error(Op4.getStartLoc(),
4937                          "requested extract overflows register");
4938 
4939           const MCExpr *NewOp4 =
4940               MCConstantExpr::create(NewOp4Val, getContext());
4941           Operands[4] = AArch64Operand::CreateImm(
4942               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4943           if (Tok == "bfxil")
4944             Operands[0] = AArch64Operand::CreateToken(
4945                 "bfm", false, Op.getStartLoc(), getContext());
4946           else if (Tok == "sbfx")
4947             Operands[0] = AArch64Operand::CreateToken(
4948                 "sbfm", false, Op.getStartLoc(), getContext());
4949           else if (Tok == "ubfx")
4950             Operands[0] = AArch64Operand::CreateToken(
4951                 "ubfm", false, Op.getStartLoc(), getContext());
4952           else
4953             llvm_unreachable("No valid mnemonic for alias?");
4954         }
4955       }
4956     }
4957   }
4958 
4959   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4960   // instruction for FP registers correctly in some rare circumstances. Convert
4961   // it to a safe instruction and warn (because silently changing someone's
4962   // assembly is rude).
4963   if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4964       NumOperands == 4 && Tok == "movi") {
4965     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4966     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4967     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4968     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4969         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4970       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4971       if (Suffix.lower() == ".2d" &&
4972           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4973         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4974                 " correctly on this CPU, converting to equivalent movi.16b");
4975         // Switch the suffix to .16b.
4976         unsigned Idx = Op1.isToken() ? 1 : 2;
4977         Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4978                                                   getContext());
4979       }
4980     }
4981   }
4982 
4983   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4984   //        InstAlias can't quite handle this since the reg classes aren't
4985   //        subclasses.
4986   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4987     // The source register can be Wn here, but the matcher expects a
4988     // GPR64. Twiddle it here if necessary.
4989     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4990     if (Op.isScalarReg()) {
4991       unsigned Reg = getXRegFromWReg(Op.getReg());
4992       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4993                                               Op.getStartLoc(), Op.getEndLoc(),
4994                                               getContext());
4995     }
4996   }
4997   // FIXME: Likewise for sxt[bh] with a Xd dst operand
4998   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4999     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5000     if (Op.isScalarReg() &&
5001         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5002             Op.getReg())) {
5003       // The source register can be Wn here, but the matcher expects a
5004       // GPR64. Twiddle it here if necessary.
5005       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5006       if (Op.isScalarReg()) {
5007         unsigned Reg = getXRegFromWReg(Op.getReg());
5008         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5009                                                 Op.getStartLoc(),
5010                                                 Op.getEndLoc(), getContext());
5011       }
5012     }
5013   }
5014   // FIXME: Likewise for uxt[bh] with a Xd dst operand
5015   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5016     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5017     if (Op.isScalarReg() &&
5018         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5019             Op.getReg())) {
5020       // The source register can be Wn here, but the matcher expects a
5021       // GPR32. Twiddle it here if necessary.
5022       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5023       if (Op.isScalarReg()) {
5024         unsigned Reg = getWRegFromXReg(Op.getReg());
5025         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5026                                                 Op.getStartLoc(),
5027                                                 Op.getEndLoc(), getContext());
5028       }
5029     }
5030   }
5031 
5032   MCInst Inst;
5033   FeatureBitset MissingFeatures;
5034   // First try to match against the secondary set of tables containing the
5035   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5036   unsigned MatchResult =
5037       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5038                            MatchingInlineAsm, 1);
5039 
5040   // If that fails, try against the alternate table containing long-form NEON:
5041   // "fadd v0.2s, v1.2s, v2.2s"
5042   if (MatchResult != Match_Success) {
5043     // But first, save the short-form match result: we can use it in case the
5044     // long-form match also fails.
5045     auto ShortFormNEONErrorInfo = ErrorInfo;
5046     auto ShortFormNEONMatchResult = MatchResult;
5047     auto ShortFormNEONMissingFeatures = MissingFeatures;
5048 
5049     MatchResult =
5050         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5051                              MatchingInlineAsm, 0);
5052 
5053     // Now, both matches failed, and the long-form match failed on the mnemonic
5054     // suffix token operand.  The short-form match failure is probably more
5055     // relevant: use it instead.
5056     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5057         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5058         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5059       MatchResult = ShortFormNEONMatchResult;
5060       ErrorInfo = ShortFormNEONErrorInfo;
5061       MissingFeatures = ShortFormNEONMissingFeatures;
5062     }
5063   }
5064 
5065   switch (MatchResult) {
5066   case Match_Success: {
5067     // Perform range checking and other semantic validations
5068     SmallVector<SMLoc, 8> OperandLocs;
5069     NumOperands = Operands.size();
5070     for (unsigned i = 1; i < NumOperands; ++i)
5071       OperandLocs.push_back(Operands[i]->getStartLoc());
5072     if (validateInstruction(Inst, IDLoc, OperandLocs))
5073       return true;
5074 
5075     Inst.setLoc(IDLoc);
5076     Out.emitInstruction(Inst, getSTI());
5077     return false;
5078   }
5079   case Match_MissingFeature: {
5080     assert(MissingFeatures.any() && "Unknown missing feature!");
5081     // Special case the error message for the very common case where only
5082     // a single subtarget feature is missing (neon, e.g.).
5083     std::string Msg = "instruction requires:";
5084     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5085       if (MissingFeatures[i]) {
5086         Msg += " ";
5087         Msg += getSubtargetFeatureName(i);
5088       }
5089     }
5090     return Error(IDLoc, Msg);
5091   }
5092   case Match_MnemonicFail:
5093     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5094   case Match_InvalidOperand: {
5095     SMLoc ErrorLoc = IDLoc;
5096 
5097     if (ErrorInfo != ~0ULL) {
5098       if (ErrorInfo >= Operands.size())
5099         return Error(IDLoc, "too few operands for instruction",
5100                      SMRange(IDLoc, getTok().getLoc()));
5101 
5102       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5103       if (ErrorLoc == SMLoc())
5104         ErrorLoc = IDLoc;
5105     }
5106     // If the match failed on a suffix token operand, tweak the diagnostic
5107     // accordingly.
5108     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5109         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5110       MatchResult = Match_InvalidSuffix;
5111 
5112     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5113   }
5114   case Match_InvalidTiedOperand:
5115   case Match_InvalidMemoryIndexed1:
5116   case Match_InvalidMemoryIndexed2:
5117   case Match_InvalidMemoryIndexed4:
5118   case Match_InvalidMemoryIndexed8:
5119   case Match_InvalidMemoryIndexed16:
5120   case Match_InvalidCondCode:
5121   case Match_AddSubRegExtendSmall:
5122   case Match_AddSubRegExtendLarge:
5123   case Match_AddSubSecondSource:
5124   case Match_LogicalSecondSource:
5125   case Match_AddSubRegShift32:
5126   case Match_AddSubRegShift64:
5127   case Match_InvalidMovImm32Shift:
5128   case Match_InvalidMovImm64Shift:
5129   case Match_InvalidFPImm:
5130   case Match_InvalidMemoryWExtend8:
5131   case Match_InvalidMemoryWExtend16:
5132   case Match_InvalidMemoryWExtend32:
5133   case Match_InvalidMemoryWExtend64:
5134   case Match_InvalidMemoryWExtend128:
5135   case Match_InvalidMemoryXExtend8:
5136   case Match_InvalidMemoryXExtend16:
5137   case Match_InvalidMemoryXExtend32:
5138   case Match_InvalidMemoryXExtend64:
5139   case Match_InvalidMemoryXExtend128:
5140   case Match_InvalidMemoryIndexed1SImm4:
5141   case Match_InvalidMemoryIndexed2SImm4:
5142   case Match_InvalidMemoryIndexed3SImm4:
5143   case Match_InvalidMemoryIndexed4SImm4:
5144   case Match_InvalidMemoryIndexed1SImm6:
5145   case Match_InvalidMemoryIndexed16SImm4:
5146   case Match_InvalidMemoryIndexed32SImm4:
5147   case Match_InvalidMemoryIndexed4SImm7:
5148   case Match_InvalidMemoryIndexed8SImm7:
5149   case Match_InvalidMemoryIndexed16SImm7:
5150   case Match_InvalidMemoryIndexed8UImm5:
5151   case Match_InvalidMemoryIndexed4UImm5:
5152   case Match_InvalidMemoryIndexed2UImm5:
5153   case Match_InvalidMemoryIndexed1UImm6:
5154   case Match_InvalidMemoryIndexed2UImm6:
5155   case Match_InvalidMemoryIndexed4UImm6:
5156   case Match_InvalidMemoryIndexed8UImm6:
5157   case Match_InvalidMemoryIndexed16UImm6:
5158   case Match_InvalidMemoryIndexedSImm6:
5159   case Match_InvalidMemoryIndexedSImm5:
5160   case Match_InvalidMemoryIndexedSImm8:
5161   case Match_InvalidMemoryIndexedSImm9:
5162   case Match_InvalidMemoryIndexed16SImm9:
5163   case Match_InvalidMemoryIndexed8SImm10:
5164   case Match_InvalidImm0_1:
5165   case Match_InvalidImm0_7:
5166   case Match_InvalidImm0_15:
5167   case Match_InvalidImm0_31:
5168   case Match_InvalidImm0_63:
5169   case Match_InvalidImm0_127:
5170   case Match_InvalidImm0_255:
5171   case Match_InvalidImm0_65535:
5172   case Match_InvalidImm1_8:
5173   case Match_InvalidImm1_16:
5174   case Match_InvalidImm1_32:
5175   case Match_InvalidImm1_64:
5176   case Match_InvalidSVEAddSubImm8:
5177   case Match_InvalidSVEAddSubImm16:
5178   case Match_InvalidSVEAddSubImm32:
5179   case Match_InvalidSVEAddSubImm64:
5180   case Match_InvalidSVECpyImm8:
5181   case Match_InvalidSVECpyImm16:
5182   case Match_InvalidSVECpyImm32:
5183   case Match_InvalidSVECpyImm64:
5184   case Match_InvalidIndexRange1_1:
5185   case Match_InvalidIndexRange0_15:
5186   case Match_InvalidIndexRange0_7:
5187   case Match_InvalidIndexRange0_3:
5188   case Match_InvalidIndexRange0_1:
5189   case Match_InvalidSVEIndexRange0_63:
5190   case Match_InvalidSVEIndexRange0_31:
5191   case Match_InvalidSVEIndexRange0_15:
5192   case Match_InvalidSVEIndexRange0_7:
5193   case Match_InvalidSVEIndexRange0_3:
5194   case Match_InvalidLabel:
5195   case Match_InvalidComplexRotationEven:
5196   case Match_InvalidComplexRotationOdd:
5197   case Match_InvalidGPR64shifted8:
5198   case Match_InvalidGPR64shifted16:
5199   case Match_InvalidGPR64shifted32:
5200   case Match_InvalidGPR64shifted64:
5201   case Match_InvalidGPR64NoXZRshifted8:
5202   case Match_InvalidGPR64NoXZRshifted16:
5203   case Match_InvalidGPR64NoXZRshifted32:
5204   case Match_InvalidGPR64NoXZRshifted64:
5205   case Match_InvalidZPR32UXTW8:
5206   case Match_InvalidZPR32UXTW16:
5207   case Match_InvalidZPR32UXTW32:
5208   case Match_InvalidZPR32UXTW64:
5209   case Match_InvalidZPR32SXTW8:
5210   case Match_InvalidZPR32SXTW16:
5211   case Match_InvalidZPR32SXTW32:
5212   case Match_InvalidZPR32SXTW64:
5213   case Match_InvalidZPR64UXTW8:
5214   case Match_InvalidZPR64SXTW8:
5215   case Match_InvalidZPR64UXTW16:
5216   case Match_InvalidZPR64SXTW16:
5217   case Match_InvalidZPR64UXTW32:
5218   case Match_InvalidZPR64SXTW32:
5219   case Match_InvalidZPR64UXTW64:
5220   case Match_InvalidZPR64SXTW64:
5221   case Match_InvalidZPR32LSL8:
5222   case Match_InvalidZPR32LSL16:
5223   case Match_InvalidZPR32LSL32:
5224   case Match_InvalidZPR32LSL64:
5225   case Match_InvalidZPR64LSL8:
5226   case Match_InvalidZPR64LSL16:
5227   case Match_InvalidZPR64LSL32:
5228   case Match_InvalidZPR64LSL64:
5229   case Match_InvalidZPR0:
5230   case Match_InvalidZPR8:
5231   case Match_InvalidZPR16:
5232   case Match_InvalidZPR32:
5233   case Match_InvalidZPR64:
5234   case Match_InvalidZPR128:
5235   case Match_InvalidZPR_3b8:
5236   case Match_InvalidZPR_3b16:
5237   case Match_InvalidZPR_3b32:
5238   case Match_InvalidZPR_4b16:
5239   case Match_InvalidZPR_4b32:
5240   case Match_InvalidZPR_4b64:
5241   case Match_InvalidSVEPredicateAnyReg:
5242   case Match_InvalidSVEPattern:
5243   case Match_InvalidSVEPredicateBReg:
5244   case Match_InvalidSVEPredicateHReg:
5245   case Match_InvalidSVEPredicateSReg:
5246   case Match_InvalidSVEPredicateDReg:
5247   case Match_InvalidSVEPredicate3bAnyReg:
5248   case Match_InvalidSVEPredicate3bBReg:
5249   case Match_InvalidSVEPredicate3bHReg:
5250   case Match_InvalidSVEPredicate3bSReg:
5251   case Match_InvalidSVEPredicate3bDReg:
5252   case Match_InvalidSVEExactFPImmOperandHalfOne:
5253   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5254   case Match_InvalidSVEExactFPImmOperandZeroOne:
5255   case Match_MSR:
5256   case Match_MRS: {
5257     if (ErrorInfo >= Operands.size())
5258       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5259     // Any time we get here, there's nothing fancy to do. Just get the
5260     // operand SMLoc and display the diagnostic.
5261     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5262     if (ErrorLoc == SMLoc())
5263       ErrorLoc = IDLoc;
5264     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5265   }
5266   }
5267 
5268   llvm_unreachable("Implement any new match types added!");
5269 }
5270 
5271 /// ParseDirective parses the arm specific directives
5272 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5273   const MCObjectFileInfo::Environment Format =
5274     getContext().getObjectFileInfo()->getObjectFileType();
5275   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5276   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
5277 
5278   auto IDVal = DirectiveID.getIdentifier().lower();
5279   SMLoc Loc = DirectiveID.getLoc();
5280   if (IDVal == ".arch")
5281     parseDirectiveArch(Loc);
5282   else if (IDVal == ".cpu")
5283     parseDirectiveCPU(Loc);
5284   else if (IDVal == ".tlsdesccall")
5285     parseDirectiveTLSDescCall(Loc);
5286   else if (IDVal == ".ltorg" || IDVal == ".pool")
5287     parseDirectiveLtorg(Loc);
5288   else if (IDVal == ".unreq")
5289     parseDirectiveUnreq(Loc);
5290   else if (IDVal == ".inst")
5291     parseDirectiveInst(Loc);
5292   else if (IDVal == ".cfi_negate_ra_state")
5293     parseDirectiveCFINegateRAState();
5294   else if (IDVal == ".cfi_b_key_frame")
5295     parseDirectiveCFIBKeyFrame();
5296   else if (IDVal == ".arch_extension")
5297     parseDirectiveArchExtension(Loc);
5298   else if (IDVal == ".variant_pcs")
5299     parseDirectiveVariantPCS(Loc);
5300   else if (IsMachO) {
5301     if (IDVal == MCLOHDirectiveName())
5302       parseDirectiveLOH(IDVal, Loc);
5303     else
5304       return true;
5305   } else if (IsCOFF) {
5306     if (IDVal == ".seh_stackalloc")
5307       parseDirectiveSEHAllocStack(Loc);
5308     else if (IDVal == ".seh_endprologue")
5309       parseDirectiveSEHPrologEnd(Loc);
5310     else if (IDVal == ".seh_save_r19r20_x")
5311       parseDirectiveSEHSaveR19R20X(Loc);
5312     else if (IDVal == ".seh_save_fplr")
5313       parseDirectiveSEHSaveFPLR(Loc);
5314     else if (IDVal == ".seh_save_fplr_x")
5315       parseDirectiveSEHSaveFPLRX(Loc);
5316     else if (IDVal == ".seh_save_reg")
5317       parseDirectiveSEHSaveReg(Loc);
5318     else if (IDVal == ".seh_save_reg_x")
5319       parseDirectiveSEHSaveRegX(Loc);
5320     else if (IDVal == ".seh_save_regp")
5321       parseDirectiveSEHSaveRegP(Loc);
5322     else if (IDVal == ".seh_save_regp_x")
5323       parseDirectiveSEHSaveRegPX(Loc);
5324     else if (IDVal == ".seh_save_lrpair")
5325       parseDirectiveSEHSaveLRPair(Loc);
5326     else if (IDVal == ".seh_save_freg")
5327       parseDirectiveSEHSaveFReg(Loc);
5328     else if (IDVal == ".seh_save_freg_x")
5329       parseDirectiveSEHSaveFRegX(Loc);
5330     else if (IDVal == ".seh_save_fregp")
5331       parseDirectiveSEHSaveFRegP(Loc);
5332     else if (IDVal == ".seh_save_fregp_x")
5333       parseDirectiveSEHSaveFRegPX(Loc);
5334     else if (IDVal == ".seh_set_fp")
5335       parseDirectiveSEHSetFP(Loc);
5336     else if (IDVal == ".seh_add_fp")
5337       parseDirectiveSEHAddFP(Loc);
5338     else if (IDVal == ".seh_nop")
5339       parseDirectiveSEHNop(Loc);
5340     else if (IDVal == ".seh_save_next")
5341       parseDirectiveSEHSaveNext(Loc);
5342     else if (IDVal == ".seh_startepilogue")
5343       parseDirectiveSEHEpilogStart(Loc);
5344     else if (IDVal == ".seh_endepilogue")
5345       parseDirectiveSEHEpilogEnd(Loc);
5346     else if (IDVal == ".seh_trap_frame")
5347       parseDirectiveSEHTrapFrame(Loc);
5348     else if (IDVal == ".seh_pushframe")
5349       parseDirectiveSEHMachineFrame(Loc);
5350     else if (IDVal == ".seh_context")
5351       parseDirectiveSEHContext(Loc);
5352     else if (IDVal == ".seh_clear_unwound_to_call")
5353       parseDirectiveSEHClearUnwoundToCall(Loc);
5354     else
5355       return true;
5356   } else
5357     return true;
5358   return false;
5359 }
5360 
5361 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5362                             SmallVector<StringRef, 4> &RequestedExtensions) {
5363   const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5364   const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5365 
5366   if (!NoCrypto && Crypto) {
5367     switch (ArchKind) {
5368     default:
5369       // Map 'generic' (and others) to sha2 and aes, because
5370       // that was the traditional meaning of crypto.
5371     case AArch64::ArchKind::ARMV8_1A:
5372     case AArch64::ArchKind::ARMV8_2A:
5373     case AArch64::ArchKind::ARMV8_3A:
5374       RequestedExtensions.push_back("sha2");
5375       RequestedExtensions.push_back("aes");
5376       break;
5377     case AArch64::ArchKind::ARMV8_4A:
5378     case AArch64::ArchKind::ARMV8_5A:
5379     case AArch64::ArchKind::ARMV8_6A:
5380     case AArch64::ArchKind::ARMV8_7A:
5381     case AArch64::ArchKind::ARMV8R:
5382       RequestedExtensions.push_back("sm4");
5383       RequestedExtensions.push_back("sha3");
5384       RequestedExtensions.push_back("sha2");
5385       RequestedExtensions.push_back("aes");
5386       break;
5387     }
5388   } else if (NoCrypto) {
5389     switch (ArchKind) {
5390     default:
5391       // Map 'generic' (and others) to sha2 and aes, because
5392       // that was the traditional meaning of crypto.
5393     case AArch64::ArchKind::ARMV8_1A:
5394     case AArch64::ArchKind::ARMV8_2A:
5395     case AArch64::ArchKind::ARMV8_3A:
5396       RequestedExtensions.push_back("nosha2");
5397       RequestedExtensions.push_back("noaes");
5398       break;
5399     case AArch64::ArchKind::ARMV8_4A:
5400     case AArch64::ArchKind::ARMV8_5A:
5401     case AArch64::ArchKind::ARMV8_6A:
5402     case AArch64::ArchKind::ARMV8_7A:
5403       RequestedExtensions.push_back("nosm4");
5404       RequestedExtensions.push_back("nosha3");
5405       RequestedExtensions.push_back("nosha2");
5406       RequestedExtensions.push_back("noaes");
5407       break;
5408     }
5409   }
5410 }
5411 
5412 /// parseDirectiveArch
5413 ///   ::= .arch token
5414 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5415   SMLoc ArchLoc = getLoc();
5416 
5417   StringRef Arch, ExtensionString;
5418   std::tie(Arch, ExtensionString) =
5419       getParser().parseStringToEndOfStatement().trim().split('+');
5420 
5421   AArch64::ArchKind ID = AArch64::parseArch(Arch);
5422   if (ID == AArch64::ArchKind::INVALID)
5423     return Error(ArchLoc, "unknown arch name");
5424 
5425   if (parseToken(AsmToken::EndOfStatement))
5426     return true;
5427 
5428   // Get the architecture and extension features.
5429   std::vector<StringRef> AArch64Features;
5430   AArch64::getArchFeatures(ID, AArch64Features);
5431   AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5432                                 AArch64Features);
5433 
5434   MCSubtargetInfo &STI = copySTI();
5435   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5436   STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
5437                          join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5438 
5439   SmallVector<StringRef, 4> RequestedExtensions;
5440   if (!ExtensionString.empty())
5441     ExtensionString.split(RequestedExtensions, '+');
5442 
5443   ExpandCryptoAEK(ID, RequestedExtensions);
5444 
5445   FeatureBitset Features = STI.getFeatureBits();
5446   for (auto Name : RequestedExtensions) {
5447     bool EnableFeature = true;
5448 
5449     if (Name.startswith_lower("no")) {
5450       EnableFeature = false;
5451       Name = Name.substr(2);
5452     }
5453 
5454     for (const auto &Extension : ExtensionMap) {
5455       if (Extension.Name != Name)
5456         continue;
5457 
5458       if (Extension.Features.none())
5459         report_fatal_error("unsupported architectural extension: " + Name);
5460 
5461       FeatureBitset ToggleFeatures = EnableFeature
5462                                          ? (~Features & Extension.Features)
5463                                          : ( Features & Extension.Features);
5464       FeatureBitset Features =
5465           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5466       setAvailableFeatures(Features);
5467       break;
5468     }
5469   }
5470   return false;
5471 }
5472 
5473 /// parseDirectiveArchExtension
5474 ///   ::= .arch_extension [no]feature
5475 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5476   SMLoc ExtLoc = getLoc();
5477 
5478   StringRef Name = getParser().parseStringToEndOfStatement().trim();
5479 
5480   if (parseToken(AsmToken::EndOfStatement,
5481                  "unexpected token in '.arch_extension' directive"))
5482     return true;
5483 
5484   bool EnableFeature = true;
5485   if (Name.startswith_lower("no")) {
5486     EnableFeature = false;
5487     Name = Name.substr(2);
5488   }
5489 
5490   MCSubtargetInfo &STI = copySTI();
5491   FeatureBitset Features = STI.getFeatureBits();
5492   for (const auto &Extension : ExtensionMap) {
5493     if (Extension.Name != Name)
5494       continue;
5495 
5496     if (Extension.Features.none())
5497       return Error(ExtLoc, "unsupported architectural extension: " + Name);
5498 
5499     FeatureBitset ToggleFeatures = EnableFeature
5500                                        ? (~Features & Extension.Features)
5501                                        : (Features & Extension.Features);
5502     FeatureBitset Features =
5503         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5504     setAvailableFeatures(Features);
5505     return false;
5506   }
5507 
5508   return Error(ExtLoc, "unknown architectural extension: " + Name);
5509 }
5510 
5511 static SMLoc incrementLoc(SMLoc L, int Offset) {
5512   return SMLoc::getFromPointer(L.getPointer() + Offset);
5513 }
5514 
5515 /// parseDirectiveCPU
5516 ///   ::= .cpu id
5517 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5518   SMLoc CurLoc = getLoc();
5519 
5520   StringRef CPU, ExtensionString;
5521   std::tie(CPU, ExtensionString) =
5522       getParser().parseStringToEndOfStatement().trim().split('+');
5523 
5524   if (parseToken(AsmToken::EndOfStatement))
5525     return true;
5526 
5527   SmallVector<StringRef, 4> RequestedExtensions;
5528   if (!ExtensionString.empty())
5529     ExtensionString.split(RequestedExtensions, '+');
5530 
5531   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5532   // once that is tablegen'ed
5533   if (!getSTI().isCPUStringValid(CPU)) {
5534     Error(CurLoc, "unknown CPU name");
5535     return false;
5536   }
5537 
5538   MCSubtargetInfo &STI = copySTI();
5539   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
5540   CurLoc = incrementLoc(CurLoc, CPU.size());
5541 
5542   ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5543 
5544   FeatureBitset Features = STI.getFeatureBits();
5545   for (auto Name : RequestedExtensions) {
5546     // Advance source location past '+'.
5547     CurLoc = incrementLoc(CurLoc, 1);
5548 
5549     bool EnableFeature = true;
5550 
5551     if (Name.startswith_lower("no")) {
5552       EnableFeature = false;
5553       Name = Name.substr(2);
5554     }
5555 
5556     bool FoundExtension = false;
5557     for (const auto &Extension : ExtensionMap) {
5558       if (Extension.Name != Name)
5559         continue;
5560 
5561       if (Extension.Features.none())
5562         report_fatal_error("unsupported architectural extension: " + Name);
5563 
5564       FeatureBitset ToggleFeatures = EnableFeature
5565                                          ? (~Features & Extension.Features)
5566                                          : ( Features & Extension.Features);
5567       FeatureBitset Features =
5568           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5569       setAvailableFeatures(Features);
5570       FoundExtension = true;
5571 
5572       break;
5573     }
5574 
5575     if (!FoundExtension)
5576       Error(CurLoc, "unsupported architectural extension");
5577 
5578     CurLoc = incrementLoc(CurLoc, Name.size());
5579   }
5580   return false;
5581 }
5582 
5583 /// parseDirectiveInst
5584 ///  ::= .inst opcode [, ...]
5585 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5586   if (getLexer().is(AsmToken::EndOfStatement))
5587     return Error(Loc, "expected expression following '.inst' directive");
5588 
5589   auto parseOp = [&]() -> bool {
5590     SMLoc L = getLoc();
5591     const MCExpr *Expr = nullptr;
5592     if (check(getParser().parseExpression(Expr), L, "expected expression"))
5593       return true;
5594     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5595     if (check(!Value, L, "expected constant expression"))
5596       return true;
5597     getTargetStreamer().emitInst(Value->getValue());
5598     return false;
5599   };
5600 
5601   if (parseMany(parseOp))
5602     return addErrorSuffix(" in '.inst' directive");
5603   return false;
5604 }
5605 
5606 // parseDirectiveTLSDescCall:
5607 //   ::= .tlsdesccall symbol
5608 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5609   StringRef Name;
5610   if (check(getParser().parseIdentifier(Name), L,
5611             "expected symbol after directive") ||
5612       parseToken(AsmToken::EndOfStatement))
5613     return true;
5614 
5615   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5616   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5617   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5618 
5619   MCInst Inst;
5620   Inst.setOpcode(AArch64::TLSDESCCALL);
5621   Inst.addOperand(MCOperand::createExpr(Expr));
5622 
5623   getParser().getStreamer().emitInstruction(Inst, getSTI());
5624   return false;
5625 }
5626 
5627 /// ::= .loh <lohName | lohId> label1, ..., labelN
5628 /// The number of arguments depends on the loh identifier.
5629 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5630   MCLOHType Kind;
5631   if (getParser().getTok().isNot(AsmToken::Identifier)) {
5632     if (getParser().getTok().isNot(AsmToken::Integer))
5633       return TokError("expected an identifier or a number in directive");
5634     // We successfully get a numeric value for the identifier.
5635     // Check if it is valid.
5636     int64_t Id = getParser().getTok().getIntVal();
5637     if (Id <= -1U && !isValidMCLOHType(Id))
5638       return TokError("invalid numeric identifier in directive");
5639     Kind = (MCLOHType)Id;
5640   } else {
5641     StringRef Name = getTok().getIdentifier();
5642     // We successfully parse an identifier.
5643     // Check if it is a recognized one.
5644     int Id = MCLOHNameToId(Name);
5645 
5646     if (Id == -1)
5647       return TokError("invalid identifier in directive");
5648     Kind = (MCLOHType)Id;
5649   }
5650   // Consume the identifier.
5651   Lex();
5652   // Get the number of arguments of this LOH.
5653   int NbArgs = MCLOHIdToNbArgs(Kind);
5654 
5655   assert(NbArgs != -1 && "Invalid number of arguments");
5656 
5657   SmallVector<MCSymbol *, 3> Args;
5658   for (int Idx = 0; Idx < NbArgs; ++Idx) {
5659     StringRef Name;
5660     if (getParser().parseIdentifier(Name))
5661       return TokError("expected identifier in directive");
5662     Args.push_back(getContext().getOrCreateSymbol(Name));
5663 
5664     if (Idx + 1 == NbArgs)
5665       break;
5666     if (parseToken(AsmToken::Comma,
5667                    "unexpected token in '" + Twine(IDVal) + "' directive"))
5668       return true;
5669   }
5670   if (parseToken(AsmToken::EndOfStatement,
5671                  "unexpected token in '" + Twine(IDVal) + "' directive"))
5672     return true;
5673 
5674   getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
5675   return false;
5676 }
5677 
5678 /// parseDirectiveLtorg
5679 ///  ::= .ltorg | .pool
5680 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5681   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5682     return true;
5683   getTargetStreamer().emitCurrentConstantPool();
5684   return false;
5685 }
5686 
5687 /// parseDirectiveReq
5688 ///  ::= name .req registername
5689 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5690   MCAsmParser &Parser = getParser();
5691   Parser.Lex(); // Eat the '.req' token.
5692   SMLoc SRegLoc = getLoc();
5693   RegKind RegisterKind = RegKind::Scalar;
5694   unsigned RegNum;
5695   OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5696 
5697   if (ParseRes != MatchOperand_Success) {
5698     StringRef Kind;
5699     RegisterKind = RegKind::NeonVector;
5700     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5701 
5702     if (ParseRes == MatchOperand_ParseFail)
5703       return true;
5704 
5705     if (ParseRes == MatchOperand_Success && !Kind.empty())
5706       return Error(SRegLoc, "vector register without type specifier expected");
5707   }
5708 
5709   if (ParseRes != MatchOperand_Success) {
5710     StringRef Kind;
5711     RegisterKind = RegKind::SVEDataVector;
5712     ParseRes =
5713         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5714 
5715     if (ParseRes == MatchOperand_ParseFail)
5716       return true;
5717 
5718     if (ParseRes == MatchOperand_Success && !Kind.empty())
5719       return Error(SRegLoc,
5720                    "sve vector register without type specifier expected");
5721   }
5722 
5723   if (ParseRes != MatchOperand_Success) {
5724     StringRef Kind;
5725     RegisterKind = RegKind::SVEPredicateVector;
5726     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5727 
5728     if (ParseRes == MatchOperand_ParseFail)
5729       return true;
5730 
5731     if (ParseRes == MatchOperand_Success && !Kind.empty())
5732       return Error(SRegLoc,
5733                    "sve predicate register without type specifier expected");
5734   }
5735 
5736   if (ParseRes != MatchOperand_Success)
5737     return Error(SRegLoc, "register name or alias expected");
5738 
5739   // Shouldn't be anything else.
5740   if (parseToken(AsmToken::EndOfStatement,
5741                  "unexpected input in .req directive"))
5742     return true;
5743 
5744   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5745   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5746     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5747 
5748   return false;
5749 }
5750 
5751 /// parseDirectiveUneq
5752 ///  ::= .unreq registername
5753 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5754   MCAsmParser &Parser = getParser();
5755   if (getTok().isNot(AsmToken::Identifier))
5756     return TokError("unexpected input in .unreq directive.");
5757   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5758   Parser.Lex(); // Eat the identifier.
5759   if (parseToken(AsmToken::EndOfStatement))
5760     return addErrorSuffix("in '.unreq' directive");
5761   return false;
5762 }
5763 
5764 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5765   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5766     return true;
5767   getStreamer().emitCFINegateRAState();
5768   return false;
5769 }
5770 
5771 /// parseDirectiveCFIBKeyFrame
5772 /// ::= .cfi_b_key
5773 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5774   if (parseToken(AsmToken::EndOfStatement,
5775                  "unexpected token in '.cfi_b_key_frame'"))
5776     return true;
5777   getStreamer().emitCFIBKeyFrame();
5778   return false;
5779 }
5780 
5781 /// parseDirectiveVariantPCS
5782 /// ::= .variant_pcs symbolname
5783 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
5784   MCAsmParser &Parser = getParser();
5785 
5786   const AsmToken &Tok = Parser.getTok();
5787   if (Tok.isNot(AsmToken::Identifier))
5788     return TokError("expected symbol name");
5789 
5790   StringRef SymbolName = Tok.getIdentifier();
5791 
5792   MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
5793   if (!Sym)
5794     return TokError("unknown symbol in '.variant_pcs' directive");
5795 
5796   Parser.Lex(); // Eat the symbol
5797 
5798   // Shouldn't be any more tokens
5799   if (parseToken(AsmToken::EndOfStatement))
5800     return addErrorSuffix(" in '.variant_pcs' directive");
5801 
5802   getTargetStreamer().emitDirectiveVariantPCS(Sym);
5803 
5804   return false;
5805 }
5806 
5807 /// parseDirectiveSEHAllocStack
5808 /// ::= .seh_stackalloc
5809 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
5810   int64_t Size;
5811   if (parseImmExpr(Size))
5812     return true;
5813   getTargetStreamer().EmitARM64WinCFIAllocStack(Size);
5814   return false;
5815 }
5816 
5817 /// parseDirectiveSEHPrologEnd
5818 /// ::= .seh_endprologue
5819 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
5820   getTargetStreamer().EmitARM64WinCFIPrologEnd();
5821   return false;
5822 }
5823 
5824 /// parseDirectiveSEHSaveR19R20X
5825 /// ::= .seh_save_r19r20_x
5826 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
5827   int64_t Offset;
5828   if (parseImmExpr(Offset))
5829     return true;
5830   getTargetStreamer().EmitARM64WinCFISaveR19R20X(Offset);
5831   return false;
5832 }
5833 
5834 /// parseDirectiveSEHSaveFPLR
5835 /// ::= .seh_save_fplr
5836 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
5837   int64_t Offset;
5838   if (parseImmExpr(Offset))
5839     return true;
5840   getTargetStreamer().EmitARM64WinCFISaveFPLR(Offset);
5841   return false;
5842 }
5843 
5844 /// parseDirectiveSEHSaveFPLRX
5845 /// ::= .seh_save_fplr_x
5846 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
5847   int64_t Offset;
5848   if (parseImmExpr(Offset))
5849     return true;
5850   getTargetStreamer().EmitARM64WinCFISaveFPLRX(Offset);
5851   return false;
5852 }
5853 
5854 /// parseDirectiveSEHSaveReg
5855 /// ::= .seh_save_reg
5856 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
5857   unsigned Reg;
5858   int64_t Offset;
5859   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5860       parseComma() || parseImmExpr(Offset))
5861     return true;
5862   getTargetStreamer().EmitARM64WinCFISaveReg(Reg, Offset);
5863   return false;
5864 }
5865 
5866 /// parseDirectiveSEHSaveRegX
5867 /// ::= .seh_save_reg_x
5868 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
5869   unsigned Reg;
5870   int64_t Offset;
5871   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5872       parseComma() || parseImmExpr(Offset))
5873     return true;
5874   getTargetStreamer().EmitARM64WinCFISaveRegX(Reg, Offset);
5875   return false;
5876 }
5877 
5878 /// parseDirectiveSEHSaveRegP
5879 /// ::= .seh_save_regp
5880 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
5881   unsigned Reg;
5882   int64_t Offset;
5883   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5884       parseComma() || parseImmExpr(Offset))
5885     return true;
5886   getTargetStreamer().EmitARM64WinCFISaveRegP(Reg, Offset);
5887   return false;
5888 }
5889 
5890 /// parseDirectiveSEHSaveRegPX
5891 /// ::= .seh_save_regp_x
5892 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
5893   unsigned Reg;
5894   int64_t Offset;
5895   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
5896       parseComma() || parseImmExpr(Offset))
5897     return true;
5898   getTargetStreamer().EmitARM64WinCFISaveRegPX(Reg, Offset);
5899   return false;
5900 }
5901 
5902 /// parseDirectiveSEHSaveLRPair
5903 /// ::= .seh_save_lrpair
5904 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
5905   unsigned Reg;
5906   int64_t Offset;
5907   L = getLoc();
5908   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
5909       parseComma() || parseImmExpr(Offset))
5910     return true;
5911   if (check(((Reg - 19) % 2 != 0), L,
5912             "expected register with even offset from x19"))
5913     return true;
5914   getTargetStreamer().EmitARM64WinCFISaveLRPair(Reg, Offset);
5915   return false;
5916 }
5917 
5918 /// parseDirectiveSEHSaveFReg
5919 /// ::= .seh_save_freg
5920 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
5921   unsigned Reg;
5922   int64_t Offset;
5923   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5924       parseComma() || parseImmExpr(Offset))
5925     return true;
5926   getTargetStreamer().EmitARM64WinCFISaveFReg(Reg, Offset);
5927   return false;
5928 }
5929 
5930 /// parseDirectiveSEHSaveFRegX
5931 /// ::= .seh_save_freg_x
5932 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
5933   unsigned Reg;
5934   int64_t Offset;
5935   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
5936       parseComma() || parseImmExpr(Offset))
5937     return true;
5938   getTargetStreamer().EmitARM64WinCFISaveFRegX(Reg, Offset);
5939   return false;
5940 }
5941 
5942 /// parseDirectiveSEHSaveFRegP
5943 /// ::= .seh_save_fregp
5944 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
5945   unsigned Reg;
5946   int64_t Offset;
5947   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
5948       parseComma() || parseImmExpr(Offset))
5949     return true;
5950   getTargetStreamer().EmitARM64WinCFISaveFRegP(Reg, Offset);
5951   return false;
5952 }
5953 
5954 /// parseDirectiveSEHSaveFRegPX
5955 /// ::= .seh_save_fregp_x
5956 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
5957   unsigned Reg;
5958   int64_t Offset;
5959   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
5960       parseComma() || parseImmExpr(Offset))
5961     return true;
5962   getTargetStreamer().EmitARM64WinCFISaveFRegPX(Reg, Offset);
5963   return false;
5964 }
5965 
5966 /// parseDirectiveSEHSetFP
5967 /// ::= .seh_set_fp
5968 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
5969   getTargetStreamer().EmitARM64WinCFISetFP();
5970   return false;
5971 }
5972 
5973 /// parseDirectiveSEHAddFP
5974 /// ::= .seh_add_fp
5975 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
5976   int64_t Size;
5977   if (parseImmExpr(Size))
5978     return true;
5979   getTargetStreamer().EmitARM64WinCFIAddFP(Size);
5980   return false;
5981 }
5982 
5983 /// parseDirectiveSEHNop
5984 /// ::= .seh_nop
5985 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
5986   getTargetStreamer().EmitARM64WinCFINop();
5987   return false;
5988 }
5989 
5990 /// parseDirectiveSEHSaveNext
5991 /// ::= .seh_save_next
5992 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
5993   getTargetStreamer().EmitARM64WinCFISaveNext();
5994   return false;
5995 }
5996 
5997 /// parseDirectiveSEHEpilogStart
5998 /// ::= .seh_startepilogue
5999 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
6000   getTargetStreamer().EmitARM64WinCFIEpilogStart();
6001   return false;
6002 }
6003 
6004 /// parseDirectiveSEHEpilogEnd
6005 /// ::= .seh_endepilogue
6006 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
6007   getTargetStreamer().EmitARM64WinCFIEpilogEnd();
6008   return false;
6009 }
6010 
6011 /// parseDirectiveSEHTrapFrame
6012 /// ::= .seh_trap_frame
6013 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
6014   getTargetStreamer().EmitARM64WinCFITrapFrame();
6015   return false;
6016 }
6017 
6018 /// parseDirectiveSEHMachineFrame
6019 /// ::= .seh_pushframe
6020 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
6021   getTargetStreamer().EmitARM64WinCFIMachineFrame();
6022   return false;
6023 }
6024 
6025 /// parseDirectiveSEHContext
6026 /// ::= .seh_context
6027 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
6028   getTargetStreamer().EmitARM64WinCFIContext();
6029   return false;
6030 }
6031 
6032 /// parseDirectiveSEHClearUnwoundToCall
6033 /// ::= .seh_clear_unwound_to_call
6034 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
6035   getTargetStreamer().EmitARM64WinCFIClearUnwoundToCall();
6036   return false;
6037 }
6038 
6039 bool
6040 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
6041                                     AArch64MCExpr::VariantKind &ELFRefKind,
6042                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
6043                                     int64_t &Addend) {
6044   ELFRefKind = AArch64MCExpr::VK_INVALID;
6045   DarwinRefKind = MCSymbolRefExpr::VK_None;
6046   Addend = 0;
6047 
6048   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
6049     ELFRefKind = AE->getKind();
6050     Expr = AE->getSubExpr();
6051   }
6052 
6053   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
6054   if (SE) {
6055     // It's a simple symbol reference with no addend.
6056     DarwinRefKind = SE->getKind();
6057     return true;
6058   }
6059 
6060   // Check that it looks like a symbol + an addend
6061   MCValue Res;
6062   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
6063   if (!Relocatable || Res.getSymB())
6064     return false;
6065 
6066   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
6067   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
6068   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
6069     return false;
6070 
6071   if (Res.getSymA())
6072     DarwinRefKind = Res.getSymA()->getKind();
6073   Addend = Res.getConstant();
6074 
6075   // It's some symbol reference + a constant addend, but really
6076   // shouldn't use both Darwin and ELF syntax.
6077   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
6078          DarwinRefKind == MCSymbolRefExpr::VK_None;
6079 }
6080 
6081 /// Force static initialization.
6082 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
6083   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
6084   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
6085   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
6086   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
6087   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
6088 }
6089 
6090 #define GET_REGISTER_MATCHER
6091 #define GET_SUBTARGET_FEATURE_NAME
6092 #define GET_MATCHER_IMPLEMENTATION
6093 #define GET_MNEMONIC_SPELL_CHECKER
6094 #include "AArch64GenAsmMatcher.inc"
6095 
6096 // Define this matcher function after the auto-generated include so we
6097 // have the match class enum definitions.
6098 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
6099                                                       unsigned Kind) {
6100   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
6101   // If the kind is a token for a literal immediate, check if our asm
6102   // operand matches. This is for InstAliases which have a fixed-value
6103   // immediate in the syntax.
6104   int64_t ExpectedVal;
6105   switch (Kind) {
6106   default:
6107     return Match_InvalidOperand;
6108   case MCK__HASH_0:
6109     ExpectedVal = 0;
6110     break;
6111   case MCK__HASH_1:
6112     ExpectedVal = 1;
6113     break;
6114   case MCK__HASH_12:
6115     ExpectedVal = 12;
6116     break;
6117   case MCK__HASH_16:
6118     ExpectedVal = 16;
6119     break;
6120   case MCK__HASH_2:
6121     ExpectedVal = 2;
6122     break;
6123   case MCK__HASH_24:
6124     ExpectedVal = 24;
6125     break;
6126   case MCK__HASH_3:
6127     ExpectedVal = 3;
6128     break;
6129   case MCK__HASH_32:
6130     ExpectedVal = 32;
6131     break;
6132   case MCK__HASH_4:
6133     ExpectedVal = 4;
6134     break;
6135   case MCK__HASH_48:
6136     ExpectedVal = 48;
6137     break;
6138   case MCK__HASH_6:
6139     ExpectedVal = 6;
6140     break;
6141   case MCK__HASH_64:
6142     ExpectedVal = 64;
6143     break;
6144   case MCK__HASH_8:
6145     ExpectedVal = 8;
6146     break;
6147   }
6148   if (!Op.isImm())
6149     return Match_InvalidOperand;
6150   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6151   if (!CE)
6152     return Match_InvalidOperand;
6153   if (CE->getValue() == ExpectedVal)
6154     return Match_Success;
6155   return Match_InvalidOperand;
6156 }
6157 
6158 OperandMatchResultTy
6159 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
6160 
6161   SMLoc S = getLoc();
6162 
6163   if (getParser().getTok().isNot(AsmToken::Identifier)) {
6164     Error(S, "expected register");
6165     return MatchOperand_ParseFail;
6166   }
6167 
6168   unsigned FirstReg;
6169   OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
6170   if (Res != MatchOperand_Success)
6171     return MatchOperand_ParseFail;
6172 
6173   const MCRegisterClass &WRegClass =
6174       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
6175   const MCRegisterClass &XRegClass =
6176       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
6177 
6178   bool isXReg = XRegClass.contains(FirstReg),
6179        isWReg = WRegClass.contains(FirstReg);
6180   if (!isXReg && !isWReg) {
6181     Error(S, "expected first even register of a "
6182              "consecutive same-size even/odd register pair");
6183     return MatchOperand_ParseFail;
6184   }
6185 
6186   const MCRegisterInfo *RI = getContext().getRegisterInfo();
6187   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
6188 
6189   if (FirstEncoding & 0x1) {
6190     Error(S, "expected first even register of a "
6191              "consecutive same-size even/odd register pair");
6192     return MatchOperand_ParseFail;
6193   }
6194 
6195   if (getParser().getTok().isNot(AsmToken::Comma)) {
6196     Error(getLoc(), "expected comma");
6197     return MatchOperand_ParseFail;
6198   }
6199   // Eat the comma
6200   getParser().Lex();
6201 
6202   SMLoc E = getLoc();
6203   unsigned SecondReg;
6204   Res = tryParseScalarRegister(SecondReg);
6205   if (Res != MatchOperand_Success)
6206     return MatchOperand_ParseFail;
6207 
6208   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
6209       (isXReg && !XRegClass.contains(SecondReg)) ||
6210       (isWReg && !WRegClass.contains(SecondReg))) {
6211     Error(E,"expected second odd register of a "
6212              "consecutive same-size even/odd register pair");
6213     return MatchOperand_ParseFail;
6214   }
6215 
6216   unsigned Pair = 0;
6217   if (isXReg) {
6218     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
6219            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
6220   } else {
6221     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
6222            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
6223   }
6224 
6225   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
6226       getLoc(), getContext()));
6227 
6228   return MatchOperand_Success;
6229 }
6230 
6231 template <bool ParseShiftExtend, bool ParseSuffix>
6232 OperandMatchResultTy
6233 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
6234   const SMLoc S = getLoc();
6235   // Check for a SVE vector register specifier first.
6236   unsigned RegNum;
6237   StringRef Kind;
6238 
6239   OperandMatchResultTy Res =
6240       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6241 
6242   if (Res != MatchOperand_Success)
6243     return Res;
6244 
6245   if (ParseSuffix && Kind.empty())
6246     return MatchOperand_NoMatch;
6247 
6248   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
6249   if (!KindRes)
6250     return MatchOperand_NoMatch;
6251 
6252   unsigned ElementWidth = KindRes->second;
6253 
6254   // No shift/extend is the default.
6255   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
6256     Operands.push_back(AArch64Operand::CreateVectorReg(
6257         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
6258 
6259     OperandMatchResultTy Res = tryParseVectorIndex(Operands);
6260     if (Res == MatchOperand_ParseFail)
6261       return MatchOperand_ParseFail;
6262     return MatchOperand_Success;
6263   }
6264 
6265   // Eat the comma
6266   getParser().Lex();
6267 
6268   // Match the shift
6269   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
6270   Res = tryParseOptionalShiftExtend(ExtOpnd);
6271   if (Res != MatchOperand_Success)
6272     return Res;
6273 
6274   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
6275   Operands.push_back(AArch64Operand::CreateVectorReg(
6276       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
6277       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
6278       Ext->hasShiftExtendAmount()));
6279 
6280   return MatchOperand_Success;
6281 }
6282 
6283 OperandMatchResultTy
6284 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
6285   MCAsmParser &Parser = getParser();
6286 
6287   SMLoc SS = getLoc();
6288   const AsmToken &TokE = Parser.getTok();
6289   bool IsHash = TokE.is(AsmToken::Hash);
6290 
6291   if (!IsHash && TokE.isNot(AsmToken::Identifier))
6292     return MatchOperand_NoMatch;
6293 
6294   int64_t Pattern;
6295   if (IsHash) {
6296     Parser.Lex(); // Eat hash
6297 
6298     // Parse the immediate operand.
6299     const MCExpr *ImmVal;
6300     SS = getLoc();
6301     if (Parser.parseExpression(ImmVal))
6302       return MatchOperand_ParseFail;
6303 
6304     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
6305     if (!MCE)
6306       return MatchOperand_ParseFail;
6307 
6308     Pattern = MCE->getValue();
6309   } else {
6310     // Parse the pattern
6311     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
6312     if (!Pat)
6313       return MatchOperand_NoMatch;
6314 
6315     Parser.Lex();
6316     Pattern = Pat->Encoding;
6317     assert(Pattern >= 0 && Pattern < 32);
6318   }
6319 
6320   Operands.push_back(
6321       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
6322                                 SS, getLoc(), getContext()));
6323 
6324   return MatchOperand_Success;
6325 }
6326 
6327 OperandMatchResultTy
6328 AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
6329   SMLoc SS = getLoc();
6330 
6331   unsigned XReg;
6332   if (tryParseScalarRegister(XReg) != MatchOperand_Success)
6333     return MatchOperand_NoMatch;
6334 
6335   MCContext &ctx = getContext();
6336   const MCRegisterInfo *RI = ctx.getRegisterInfo();
6337   int X8Reg = RI->getMatchingSuperReg(
6338       XReg, AArch64::x8sub_0,
6339       &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
6340   if (!X8Reg) {
6341     Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
6342     return MatchOperand_ParseFail;
6343   }
6344 
6345   Operands.push_back(
6346       AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
6347   return MatchOperand_Success;
6348 }
6349