xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp (revision ece7a5e9849032f4a31e725714c2db89d055b706)
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64AddressingModes.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "TargetInfo/AArch64TargetInfo.h"
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCLinkerOptimizationHint.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmLexer.h"
32 #include "llvm/MC/MCParser/MCAsmParser.h"
33 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
34 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/MC/MCTargetOptions.h"
41 #include "llvm/MC/SubtargetFeature.h"
42 #include "llvm/MC/MCValue.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/MathExtras.h"
47 #include "llvm/Support/SMLoc.h"
48 #include "llvm/Support/TargetParser.h"
49 #include "llvm/Support/TargetRegistry.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <cassert>
52 #include <cctype>
53 #include <cstdint>
54 #include <cstdio>
55 #include <string>
56 #include <tuple>
57 #include <utility>
58 #include <vector>
59 
60 using namespace llvm;
61 
62 namespace {
63 
64 enum class RegKind {
65   Scalar,
66   NeonVector,
67   SVEDataVector,
68   SVEPredicateVector
69 };
70 
71 enum RegConstraintEqualityTy {
72   EqualsReg,
73   EqualsSuperReg,
74   EqualsSubReg
75 };
76 
77 class AArch64AsmParser : public MCTargetAsmParser {
78 private:
79   StringRef Mnemonic; ///< Instruction mnemonic.
80 
81   // Map of register aliases registers via the .req directive.
82   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
83 
84   class PrefixInfo {
85   public:
86     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87       PrefixInfo Prefix;
88       switch (Inst.getOpcode()) {
89       case AArch64::MOVPRFX_ZZ:
90         Prefix.Active = true;
91         Prefix.Dst = Inst.getOperand(0).getReg();
92         break;
93       case AArch64::MOVPRFX_ZPmZ_B:
94       case AArch64::MOVPRFX_ZPmZ_H:
95       case AArch64::MOVPRFX_ZPmZ_S:
96       case AArch64::MOVPRFX_ZPmZ_D:
97         Prefix.Active = true;
98         Prefix.Predicated = true;
99         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
100         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
101                "No destructive element size set for movprfx");
102         Prefix.Dst = Inst.getOperand(0).getReg();
103         Prefix.Pg = Inst.getOperand(2).getReg();
104         break;
105       case AArch64::MOVPRFX_ZPzZ_B:
106       case AArch64::MOVPRFX_ZPzZ_H:
107       case AArch64::MOVPRFX_ZPzZ_S:
108       case AArch64::MOVPRFX_ZPzZ_D:
109         Prefix.Active = true;
110         Prefix.Predicated = true;
111         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
112         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
113                "No destructive element size set for movprfx");
114         Prefix.Dst = Inst.getOperand(0).getReg();
115         Prefix.Pg = Inst.getOperand(1).getReg();
116         break;
117       default:
118         break;
119       }
120 
121       return Prefix;
122     }
123 
124     PrefixInfo() : Active(false), Predicated(false) {}
125     bool isActive() const { return Active; }
126     bool isPredicated() const { return Predicated; }
127     unsigned getElementSize() const {
128       assert(Predicated);
129       return ElementSize;
130     }
131     unsigned getDstReg() const { return Dst; }
132     unsigned getPgReg() const {
133       assert(Predicated);
134       return Pg;
135     }
136 
137   private:
138     bool Active;
139     bool Predicated;
140     unsigned ElementSize;
141     unsigned Dst;
142     unsigned Pg;
143   } NextPrefix;
144 
145   AArch64TargetStreamer &getTargetStreamer() {
146     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
147     return static_cast<AArch64TargetStreamer &>(TS);
148   }
149 
150   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 
152   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
153   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
154   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
155   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
156   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
157   bool parseRegister(OperandVector &Operands);
158   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
159   bool parseNeonVectorList(OperandVector &Operands);
160   bool parseOptionalMulOperand(OperandVector &Operands);
161   bool parseOperand(OperandVector &Operands, bool isCondCode,
162                     bool invertCondCode);
163 
164   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
165                       OperandVector &Operands);
166 
167   bool parseDirectiveArch(SMLoc L);
168   bool parseDirectiveArchExtension(SMLoc L);
169   bool parseDirectiveCPU(SMLoc L);
170   bool parseDirectiveInst(SMLoc L);
171 
172   bool parseDirectiveTLSDescCall(SMLoc L);
173 
174   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
175   bool parseDirectiveLtorg(SMLoc L);
176 
177   bool parseDirectiveReq(StringRef Name, SMLoc L);
178   bool parseDirectiveUnreq(SMLoc L);
179   bool parseDirectiveCFINegateRAState();
180   bool parseDirectiveCFIBKeyFrame();
181 
182   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
183                            SmallVectorImpl<SMLoc> &Loc);
184   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
185                                OperandVector &Operands, MCStreamer &Out,
186                                uint64_t &ErrorInfo,
187                                bool MatchingInlineAsm) override;
188 /// @name Auto-generated Match Functions
189 /// {
190 
191 #define GET_ASSEMBLER_HEADER
192 #include "AArch64GenAsmMatcher.inc"
193 
194   /// }
195 
196   OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
197   OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
198                                               RegKind MatchKind);
199   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
200   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
201   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
202   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
203   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
204   template <bool IsSVEPrefetch = false>
205   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
206   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
207   OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
208   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
209   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
210   template<bool AddFPZeroAsLiteral>
211   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
212   OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
213   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
214   bool tryParseNeonVectorRegister(OperandVector &Operands);
215   OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
216   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
217   template <bool ParseShiftExtend,
218             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
219   OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
220   template <bool ParseShiftExtend, bool ParseSuffix>
221   OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
222   OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
223   template <RegKind VectorKind>
224   OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
225                                           bool ExpectMatch = false);
226   OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
227 
228 public:
229   enum AArch64MatchResultTy {
230     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES
232 #include "AArch64GenAsmMatcher.inc"
233   };
234   bool IsILP32;
235 
236   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
237                    const MCInstrInfo &MII, const MCTargetOptions &Options)
238     : MCTargetAsmParser(Options, STI, MII) {
239     IsILP32 = Options.getABIName() == "ilp32";
240     MCAsmParserExtension::Initialize(Parser);
241     MCStreamer &S = getParser().getStreamer();
242     if (S.getTargetStreamer() == nullptr)
243       new AArch64TargetStreamer(S);
244 
245     // Alias .hword/.word/.[dx]word to the target-independent
246     // .2byte/.4byte/.8byte directives as they have the same form and
247     // semantics:
248     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
249     Parser.addAliasForDirective(".hword", ".2byte");
250     Parser.addAliasForDirective(".word", ".4byte");
251     Parser.addAliasForDirective(".dword", ".8byte");
252     Parser.addAliasForDirective(".xword", ".8byte");
253 
254     // Initialize the set of available features.
255     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
256   }
257 
258   bool regsEqual(const MCParsedAsmOperand &Op1,
259                  const MCParsedAsmOperand &Op2) const override;
260   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
261                         SMLoc NameLoc, OperandVector &Operands) override;
262   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
263   bool ParseDirective(AsmToken DirectiveID) override;
264   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
265                                       unsigned Kind) override;
266 
267   static bool classifySymbolRef(const MCExpr *Expr,
268                                 AArch64MCExpr::VariantKind &ELFRefKind,
269                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
270                                 int64_t &Addend);
271 };
272 
273 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
274 /// instruction.
275 class AArch64Operand : public MCParsedAsmOperand {
276 private:
277   enum KindTy {
278     k_Immediate,
279     k_ShiftedImm,
280     k_CondCode,
281     k_Register,
282     k_VectorList,
283     k_VectorIndex,
284     k_Token,
285     k_SysReg,
286     k_SysCR,
287     k_Prefetch,
288     k_ShiftExtend,
289     k_FPImm,
290     k_Barrier,
291     k_PSBHint,
292     k_BTIHint,
293   } Kind;
294 
295   SMLoc StartLoc, EndLoc;
296 
297   struct TokOp {
298     const char *Data;
299     unsigned Length;
300     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
301   };
302 
303   // Separate shift/extend operand.
304   struct ShiftExtendOp {
305     AArch64_AM::ShiftExtendType Type;
306     unsigned Amount;
307     bool HasExplicitAmount;
308   };
309 
310   struct RegOp {
311     unsigned RegNum;
312     RegKind Kind;
313     int ElementWidth;
314 
315     // The register may be allowed as a different register class,
316     // e.g. for GPR64as32 or GPR32as64.
317     RegConstraintEqualityTy EqualityTy;
318 
319     // In some cases the shift/extend needs to be explicitly parsed together
320     // with the register, rather than as a separate operand. This is needed
321     // for addressing modes where the instruction as a whole dictates the
322     // scaling/extend, rather than specific bits in the instruction.
323     // By parsing them as a single operand, we avoid the need to pass an
324     // extra operand in all CodeGen patterns (because all operands need to
325     // have an associated value), and we avoid the need to update TableGen to
326     // accept operands that have no associated bits in the instruction.
327     //
328     // An added benefit of parsing them together is that the assembler
329     // can give a sensible diagnostic if the scaling is not correct.
330     //
331     // The default is 'lsl #0' (HasExplicitAmount = false) if no
332     // ShiftExtend is specified.
333     ShiftExtendOp ShiftExtend;
334   };
335 
336   struct VectorListOp {
337     unsigned RegNum;
338     unsigned Count;
339     unsigned NumElements;
340     unsigned ElementWidth;
341     RegKind  RegisterKind;
342   };
343 
344   struct VectorIndexOp {
345     unsigned Val;
346   };
347 
348   struct ImmOp {
349     const MCExpr *Val;
350   };
351 
352   struct ShiftedImmOp {
353     const MCExpr *Val;
354     unsigned ShiftAmount;
355   };
356 
357   struct CondCodeOp {
358     AArch64CC::CondCode Code;
359   };
360 
361   struct FPImmOp {
362     uint64_t Val; // APFloat value bitcasted to uint64_t.
363     bool IsExact; // describes whether parsed value was exact.
364   };
365 
366   struct BarrierOp {
367     const char *Data;
368     unsigned Length;
369     unsigned Val; // Not the enum since not all values have names.
370   };
371 
372   struct SysRegOp {
373     const char *Data;
374     unsigned Length;
375     uint32_t MRSReg;
376     uint32_t MSRReg;
377     uint32_t PStateField;
378   };
379 
380   struct SysCRImmOp {
381     unsigned Val;
382   };
383 
384   struct PrefetchOp {
385     const char *Data;
386     unsigned Length;
387     unsigned Val;
388   };
389 
390   struct PSBHintOp {
391     const char *Data;
392     unsigned Length;
393     unsigned Val;
394   };
395 
396   struct BTIHintOp {
397     const char *Data;
398     unsigned Length;
399     unsigned Val;
400   };
401 
402   struct ExtendOp {
403     unsigned Val;
404   };
405 
406   union {
407     struct TokOp Tok;
408     struct RegOp Reg;
409     struct VectorListOp VectorList;
410     struct VectorIndexOp VectorIndex;
411     struct ImmOp Imm;
412     struct ShiftedImmOp ShiftedImm;
413     struct CondCodeOp CondCode;
414     struct FPImmOp FPImm;
415     struct BarrierOp Barrier;
416     struct SysRegOp SysReg;
417     struct SysCRImmOp SysCRImm;
418     struct PrefetchOp Prefetch;
419     struct PSBHintOp PSBHint;
420     struct BTIHintOp BTIHint;
421     struct ShiftExtendOp ShiftExtend;
422   };
423 
424   // Keep the MCContext around as the MCExprs may need manipulated during
425   // the add<>Operands() calls.
426   MCContext &Ctx;
427 
428 public:
429   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
430 
431   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
432     Kind = o.Kind;
433     StartLoc = o.StartLoc;
434     EndLoc = o.EndLoc;
435     switch (Kind) {
436     case k_Token:
437       Tok = o.Tok;
438       break;
439     case k_Immediate:
440       Imm = o.Imm;
441       break;
442     case k_ShiftedImm:
443       ShiftedImm = o.ShiftedImm;
444       break;
445     case k_CondCode:
446       CondCode = o.CondCode;
447       break;
448     case k_FPImm:
449       FPImm = o.FPImm;
450       break;
451     case k_Barrier:
452       Barrier = o.Barrier;
453       break;
454     case k_Register:
455       Reg = o.Reg;
456       break;
457     case k_VectorList:
458       VectorList = o.VectorList;
459       break;
460     case k_VectorIndex:
461       VectorIndex = o.VectorIndex;
462       break;
463     case k_SysReg:
464       SysReg = o.SysReg;
465       break;
466     case k_SysCR:
467       SysCRImm = o.SysCRImm;
468       break;
469     case k_Prefetch:
470       Prefetch = o.Prefetch;
471       break;
472     case k_PSBHint:
473       PSBHint = o.PSBHint;
474       break;
475     case k_BTIHint:
476       BTIHint = o.BTIHint;
477       break;
478     case k_ShiftExtend:
479       ShiftExtend = o.ShiftExtend;
480       break;
481     }
482   }
483 
484   /// getStartLoc - Get the location of the first token of this operand.
485   SMLoc getStartLoc() const override { return StartLoc; }
486   /// getEndLoc - Get the location of the last token of this operand.
487   SMLoc getEndLoc() const override { return EndLoc; }
488 
489   StringRef getToken() const {
490     assert(Kind == k_Token && "Invalid access!");
491     return StringRef(Tok.Data, Tok.Length);
492   }
493 
494   bool isTokenSuffix() const {
495     assert(Kind == k_Token && "Invalid access!");
496     return Tok.IsSuffix;
497   }
498 
499   const MCExpr *getImm() const {
500     assert(Kind == k_Immediate && "Invalid access!");
501     return Imm.Val;
502   }
503 
504   const MCExpr *getShiftedImmVal() const {
505     assert(Kind == k_ShiftedImm && "Invalid access!");
506     return ShiftedImm.Val;
507   }
508 
509   unsigned getShiftedImmShift() const {
510     assert(Kind == k_ShiftedImm && "Invalid access!");
511     return ShiftedImm.ShiftAmount;
512   }
513 
514   AArch64CC::CondCode getCondCode() const {
515     assert(Kind == k_CondCode && "Invalid access!");
516     return CondCode.Code;
517   }
518 
519   APFloat getFPImm() const {
520     assert (Kind == k_FPImm && "Invalid access!");
521     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
522   }
523 
524   bool getFPImmIsExact() const {
525     assert (Kind == k_FPImm && "Invalid access!");
526     return FPImm.IsExact;
527   }
528 
529   unsigned getBarrier() const {
530     assert(Kind == k_Barrier && "Invalid access!");
531     return Barrier.Val;
532   }
533 
534   StringRef getBarrierName() const {
535     assert(Kind == k_Barrier && "Invalid access!");
536     return StringRef(Barrier.Data, Barrier.Length);
537   }
538 
539   unsigned getReg() const override {
540     assert(Kind == k_Register && "Invalid access!");
541     return Reg.RegNum;
542   }
543 
544   RegConstraintEqualityTy getRegEqualityTy() const {
545     assert(Kind == k_Register && "Invalid access!");
546     return Reg.EqualityTy;
547   }
548 
549   unsigned getVectorListStart() const {
550     assert(Kind == k_VectorList && "Invalid access!");
551     return VectorList.RegNum;
552   }
553 
554   unsigned getVectorListCount() const {
555     assert(Kind == k_VectorList && "Invalid access!");
556     return VectorList.Count;
557   }
558 
559   unsigned getVectorIndex() const {
560     assert(Kind == k_VectorIndex && "Invalid access!");
561     return VectorIndex.Val;
562   }
563 
564   StringRef getSysReg() const {
565     assert(Kind == k_SysReg && "Invalid access!");
566     return StringRef(SysReg.Data, SysReg.Length);
567   }
568 
569   unsigned getSysCR() const {
570     assert(Kind == k_SysCR && "Invalid access!");
571     return SysCRImm.Val;
572   }
573 
574   unsigned getPrefetch() const {
575     assert(Kind == k_Prefetch && "Invalid access!");
576     return Prefetch.Val;
577   }
578 
579   unsigned getPSBHint() const {
580     assert(Kind == k_PSBHint && "Invalid access!");
581     return PSBHint.Val;
582   }
583 
584   StringRef getPSBHintName() const {
585     assert(Kind == k_PSBHint && "Invalid access!");
586     return StringRef(PSBHint.Data, PSBHint.Length);
587   }
588 
589   unsigned getBTIHint() const {
590     assert(Kind == k_BTIHint && "Invalid access!");
591     return BTIHint.Val;
592   }
593 
594   StringRef getBTIHintName() const {
595     assert(Kind == k_BTIHint && "Invalid access!");
596     return StringRef(BTIHint.Data, BTIHint.Length);
597   }
598 
599   StringRef getPrefetchName() const {
600     assert(Kind == k_Prefetch && "Invalid access!");
601     return StringRef(Prefetch.Data, Prefetch.Length);
602   }
603 
604   AArch64_AM::ShiftExtendType getShiftExtendType() const {
605     if (Kind == k_ShiftExtend)
606       return ShiftExtend.Type;
607     if (Kind == k_Register)
608       return Reg.ShiftExtend.Type;
609     llvm_unreachable("Invalid access!");
610   }
611 
612   unsigned getShiftExtendAmount() const {
613     if (Kind == k_ShiftExtend)
614       return ShiftExtend.Amount;
615     if (Kind == k_Register)
616       return Reg.ShiftExtend.Amount;
617     llvm_unreachable("Invalid access!");
618   }
619 
620   bool hasShiftExtendAmount() const {
621     if (Kind == k_ShiftExtend)
622       return ShiftExtend.HasExplicitAmount;
623     if (Kind == k_Register)
624       return Reg.ShiftExtend.HasExplicitAmount;
625     llvm_unreachable("Invalid access!");
626   }
627 
628   bool isImm() const override { return Kind == k_Immediate; }
629   bool isMem() const override { return false; }
630 
631   bool isUImm6() const {
632     if (!isImm())
633       return false;
634     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635     if (!MCE)
636       return false;
637     int64_t Val = MCE->getValue();
638     return (Val >= 0 && Val < 64);
639   }
640 
641   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
642 
643   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
644     return isImmScaled<Bits, Scale>(true);
645   }
646 
647   template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
648     return isImmScaled<Bits, Scale>(false);
649   }
650 
651   template <int Bits, int Scale>
652   DiagnosticPredicate isImmScaled(bool Signed) const {
653     if (!isImm())
654       return DiagnosticPredicateTy::NoMatch;
655 
656     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
657     if (!MCE)
658       return DiagnosticPredicateTy::NoMatch;
659 
660     int64_t MinVal, MaxVal;
661     if (Signed) {
662       int64_t Shift = Bits - 1;
663       MinVal = (int64_t(1) << Shift) * -Scale;
664       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
665     } else {
666       MinVal = 0;
667       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
668     }
669 
670     int64_t Val = MCE->getValue();
671     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
672       return DiagnosticPredicateTy::Match;
673 
674     return DiagnosticPredicateTy::NearMatch;
675   }
676 
677   DiagnosticPredicate isSVEPattern() const {
678     if (!isImm())
679       return DiagnosticPredicateTy::NoMatch;
680     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
681     if (!MCE)
682       return DiagnosticPredicateTy::NoMatch;
683     int64_t Val = MCE->getValue();
684     if (Val >= 0 && Val < 32)
685       return DiagnosticPredicateTy::Match;
686     return DiagnosticPredicateTy::NearMatch;
687   }
688 
689   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
690     AArch64MCExpr::VariantKind ELFRefKind;
691     MCSymbolRefExpr::VariantKind DarwinRefKind;
692     int64_t Addend;
693     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
694                                            Addend)) {
695       // If we don't understand the expression, assume the best and
696       // let the fixup and relocation code deal with it.
697       return true;
698     }
699 
700     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
701         ELFRefKind == AArch64MCExpr::VK_LO12 ||
702         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
703         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
704         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
705         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
706         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
707         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
708         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
709         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
710         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
711       // Note that we don't range-check the addend. It's adjusted modulo page
712       // size when converted, so there is no "out of range" condition when using
713       // @pageoff.
714       return true;
715     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
716                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
717       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
718       return Addend == 0;
719     }
720 
721     return false;
722   }
723 
724   template <int Scale> bool isUImm12Offset() const {
725     if (!isImm())
726       return false;
727 
728     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729     if (!MCE)
730       return isSymbolicUImm12Offset(getImm());
731 
732     int64_t Val = MCE->getValue();
733     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
734   }
735 
736   template <int N, int M>
737   bool isImmInRange() const {
738     if (!isImm())
739       return false;
740     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
741     if (!MCE)
742       return false;
743     int64_t Val = MCE->getValue();
744     return (Val >= N && Val <= M);
745   }
746 
747   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
748   // a logical immediate can always be represented when inverted.
749   template <typename T>
750   bool isLogicalImm() const {
751     if (!isImm())
752       return false;
753     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
754     if (!MCE)
755       return false;
756 
757     int64_t Val = MCE->getValue();
758     int64_t SVal = typename std::make_signed<T>::type(Val);
759     int64_t UVal = typename std::make_unsigned<T>::type(Val);
760     if (Val != SVal && Val != UVal)
761       return false;
762 
763     return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
764   }
765 
766   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
767 
768   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
769   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
770   /// immediate that can be shifted by 'Shift'.
771   template <unsigned Width>
772   Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
773     if (isShiftedImm() && Width == getShiftedImmShift())
774       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
775         return std::make_pair(CE->getValue(), Width);
776 
777     if (isImm())
778       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
779         int64_t Val = CE->getValue();
780         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
781           return std::make_pair(Val >> Width, Width);
782         else
783           return std::make_pair(Val, 0u);
784       }
785 
786     return {};
787   }
788 
789   bool isAddSubImm() const {
790     if (!isShiftedImm() && !isImm())
791       return false;
792 
793     const MCExpr *Expr;
794 
795     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
796     if (isShiftedImm()) {
797       unsigned Shift = ShiftedImm.ShiftAmount;
798       Expr = ShiftedImm.Val;
799       if (Shift != 0 && Shift != 12)
800         return false;
801     } else {
802       Expr = getImm();
803     }
804 
805     AArch64MCExpr::VariantKind ELFRefKind;
806     MCSymbolRefExpr::VariantKind DarwinRefKind;
807     int64_t Addend;
808     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
809                                           DarwinRefKind, Addend)) {
810       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
811           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
812           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
813           || ELFRefKind == AArch64MCExpr::VK_LO12
814           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
815           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
816           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
817           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
818           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
819           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
820           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
821           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
822           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
823     }
824 
825     // If it's a constant, it should be a real immediate in range.
826     if (auto ShiftedVal = getShiftedVal<12>())
827       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
828 
829     // If it's an expression, we hope for the best and let the fixup/relocation
830     // code deal with it.
831     return true;
832   }
833 
834   bool isAddSubImmNeg() const {
835     if (!isShiftedImm() && !isImm())
836       return false;
837 
838     // Otherwise it should be a real negative immediate in range.
839     if (auto ShiftedVal = getShiftedVal<12>())
840       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
841 
842     return false;
843   }
844 
845   // Signed value in the range -128 to +127. For element widths of
846   // 16 bits or higher it may also be a signed multiple of 256 in the
847   // range -32768 to +32512.
848   // For element-width of 8 bits a range of -128 to 255 is accepted,
849   // since a copy of a byte can be either signed/unsigned.
850   template <typename T>
851   DiagnosticPredicate isSVECpyImm() const {
852     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
853       return DiagnosticPredicateTy::NoMatch;
854 
855     bool IsByte =
856         std::is_same<int8_t, typename std::make_signed<T>::type>::value;
857     if (auto ShiftedImm = getShiftedVal<8>())
858       if (!(IsByte && ShiftedImm->second) &&
859           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
860                                      << ShiftedImm->second))
861         return DiagnosticPredicateTy::Match;
862 
863     return DiagnosticPredicateTy::NearMatch;
864   }
865 
866   // Unsigned value in the range 0 to 255. For element widths of
867   // 16 bits or higher it may also be a signed multiple of 256 in the
868   // range 0 to 65280.
869   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
870     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
871       return DiagnosticPredicateTy::NoMatch;
872 
873     bool IsByte =
874         std::is_same<int8_t, typename std::make_signed<T>::type>::value;
875     if (auto ShiftedImm = getShiftedVal<8>())
876       if (!(IsByte && ShiftedImm->second) &&
877           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
878                                         << ShiftedImm->second))
879         return DiagnosticPredicateTy::Match;
880 
881     return DiagnosticPredicateTy::NearMatch;
882   }
883 
884   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
885     if (isLogicalImm<T>() && !isSVECpyImm<T>())
886       return DiagnosticPredicateTy::Match;
887     return DiagnosticPredicateTy::NoMatch;
888   }
889 
890   bool isCondCode() const { return Kind == k_CondCode; }
891 
892   bool isSIMDImmType10() const {
893     if (!isImm())
894       return false;
895     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
896     if (!MCE)
897       return false;
898     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
899   }
900 
901   template<int N>
902   bool isBranchTarget() const {
903     if (!isImm())
904       return false;
905     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
906     if (!MCE)
907       return true;
908     int64_t Val = MCE->getValue();
909     if (Val & 0x3)
910       return false;
911     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
912     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
913   }
914 
915   bool
916   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
917     if (!isImm())
918       return false;
919 
920     AArch64MCExpr::VariantKind ELFRefKind;
921     MCSymbolRefExpr::VariantKind DarwinRefKind;
922     int64_t Addend;
923     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
924                                              DarwinRefKind, Addend)) {
925       return false;
926     }
927     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
928       return false;
929 
930     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
931       if (ELFRefKind == AllowedModifiers[i])
932         return true;
933     }
934 
935     return false;
936   }
937 
938   bool isMovZSymbolG3() const {
939     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
940   }
941 
942   bool isMovZSymbolG2() const {
943     return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
944                          AArch64MCExpr::VK_TPREL_G2,
945                          AArch64MCExpr::VK_DTPREL_G2});
946   }
947 
948   bool isMovZSymbolG1() const {
949     return isMovWSymbol({
950         AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
951         AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
952         AArch64MCExpr::VK_DTPREL_G1,
953     });
954   }
955 
956   bool isMovZSymbolG0() const {
957     return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
958                          AArch64MCExpr::VK_TPREL_G0,
959                          AArch64MCExpr::VK_DTPREL_G0});
960   }
961 
962   bool isMovKSymbolG3() const {
963     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
964   }
965 
966   bool isMovKSymbolG2() const {
967     return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
968   }
969 
970   bool isMovKSymbolG1() const {
971     return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
972                          AArch64MCExpr::VK_TPREL_G1_NC,
973                          AArch64MCExpr::VK_DTPREL_G1_NC});
974   }
975 
976   bool isMovKSymbolG0() const {
977     return isMovWSymbol(
978         {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
979          AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
980   }
981 
982   template<int RegWidth, int Shift>
983   bool isMOVZMovAlias() const {
984     if (!isImm()) return false;
985 
986     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
987     if (!CE) return false;
988     uint64_t Value = CE->getValue();
989 
990     return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
991   }
992 
993   template<int RegWidth, int Shift>
994   bool isMOVNMovAlias() const {
995     if (!isImm()) return false;
996 
997     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998     if (!CE) return false;
999     uint64_t Value = CE->getValue();
1000 
1001     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1002   }
1003 
1004   bool isFPImm() const {
1005     return Kind == k_FPImm &&
1006            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1007   }
1008 
1009   bool isBarrier() const { return Kind == k_Barrier; }
1010   bool isSysReg() const { return Kind == k_SysReg; }
1011 
1012   bool isMRSSystemRegister() const {
1013     if (!isSysReg()) return false;
1014 
1015     return SysReg.MRSReg != -1U;
1016   }
1017 
1018   bool isMSRSystemRegister() const {
1019     if (!isSysReg()) return false;
1020     return SysReg.MSRReg != -1U;
1021   }
1022 
1023   bool isSystemPStateFieldWithImm0_1() const {
1024     if (!isSysReg()) return false;
1025     return (SysReg.PStateField == AArch64PState::PAN ||
1026             SysReg.PStateField == AArch64PState::DIT ||
1027             SysReg.PStateField == AArch64PState::UAO ||
1028             SysReg.PStateField == AArch64PState::SSBS);
1029   }
1030 
1031   bool isSystemPStateFieldWithImm0_15() const {
1032     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1033     return SysReg.PStateField != -1U;
1034   }
1035 
1036   bool isReg() const override {
1037     return Kind == k_Register;
1038   }
1039 
1040   bool isScalarReg() const {
1041     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1042   }
1043 
1044   bool isNeonVectorReg() const {
1045     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1046   }
1047 
1048   bool isNeonVectorRegLo() const {
1049     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1050            AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1051                Reg.RegNum);
1052   }
1053 
1054   template <unsigned Class> bool isSVEVectorReg() const {
1055     RegKind RK;
1056     switch (Class) {
1057     case AArch64::ZPRRegClassID:
1058     case AArch64::ZPR_3bRegClassID:
1059     case AArch64::ZPR_4bRegClassID:
1060       RK = RegKind::SVEDataVector;
1061       break;
1062     case AArch64::PPRRegClassID:
1063     case AArch64::PPR_3bRegClassID:
1064       RK = RegKind::SVEPredicateVector;
1065       break;
1066     default:
1067       llvm_unreachable("Unsupport register class");
1068     }
1069 
1070     return (Kind == k_Register && Reg.Kind == RK) &&
1071            AArch64MCRegisterClasses[Class].contains(getReg());
1072   }
1073 
1074   template <unsigned Class> bool isFPRasZPR() const {
1075     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1076            AArch64MCRegisterClasses[Class].contains(getReg());
1077   }
1078 
1079   template <int ElementWidth, unsigned Class>
1080   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1081     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1082       return DiagnosticPredicateTy::NoMatch;
1083 
1084     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1085       return DiagnosticPredicateTy::Match;
1086 
1087     return DiagnosticPredicateTy::NearMatch;
1088   }
1089 
1090   template <int ElementWidth, unsigned Class>
1091   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1092     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1093       return DiagnosticPredicateTy::NoMatch;
1094 
1095     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1096       return DiagnosticPredicateTy::Match;
1097 
1098     return DiagnosticPredicateTy::NearMatch;
1099   }
1100 
1101   template <int ElementWidth, unsigned Class,
1102             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1103             bool ShiftWidthAlwaysSame>
1104   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1105     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1106     if (!VectorMatch.isMatch())
1107       return DiagnosticPredicateTy::NoMatch;
1108 
1109     // Give a more specific diagnostic when the user has explicitly typed in
1110     // a shift-amount that does not match what is expected, but for which
1111     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1112     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1113     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1114                         ShiftExtendTy == AArch64_AM::SXTW) &&
1115         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1116       return DiagnosticPredicateTy::NoMatch;
1117 
1118     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1119       return DiagnosticPredicateTy::Match;
1120 
1121     return DiagnosticPredicateTy::NearMatch;
1122   }
1123 
1124   bool isGPR32as64() const {
1125     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1126       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1127   }
1128 
1129   bool isGPR64as32() const {
1130     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1131       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1132   }
1133 
1134   bool isWSeqPair() const {
1135     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1136            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1137                Reg.RegNum);
1138   }
1139 
1140   bool isXSeqPair() const {
1141     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1142            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1143                Reg.RegNum);
1144   }
1145 
1146   template<int64_t Angle, int64_t Remainder>
1147   DiagnosticPredicate isComplexRotation() const {
1148     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1149 
1150     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1151     if (!CE) return DiagnosticPredicateTy::NoMatch;
1152     uint64_t Value = CE->getValue();
1153 
1154     if (Value % Angle == Remainder && Value <= 270)
1155       return DiagnosticPredicateTy::Match;
1156     return DiagnosticPredicateTy::NearMatch;
1157   }
1158 
1159   template <unsigned RegClassID> bool isGPR64() const {
1160     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1161            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1162   }
1163 
1164   template <unsigned RegClassID, int ExtWidth>
1165   DiagnosticPredicate isGPR64WithShiftExtend() const {
1166     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1167       return DiagnosticPredicateTy::NoMatch;
1168 
1169     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1170         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1171       return DiagnosticPredicateTy::Match;
1172     return DiagnosticPredicateTy::NearMatch;
1173   }
1174 
1175   /// Is this a vector list with the type implicit (presumably attached to the
1176   /// instruction itself)?
1177   template <RegKind VectorKind, unsigned NumRegs>
1178   bool isImplicitlyTypedVectorList() const {
1179     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1180            VectorList.NumElements == 0 &&
1181            VectorList.RegisterKind == VectorKind;
1182   }
1183 
1184   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1185             unsigned ElementWidth>
1186   bool isTypedVectorList() const {
1187     if (Kind != k_VectorList)
1188       return false;
1189     if (VectorList.Count != NumRegs)
1190       return false;
1191     if (VectorList.RegisterKind != VectorKind)
1192       return false;
1193     if (VectorList.ElementWidth != ElementWidth)
1194       return false;
1195     return VectorList.NumElements == NumElements;
1196   }
1197 
1198   template <int Min, int Max>
1199   DiagnosticPredicate isVectorIndex() const {
1200     if (Kind != k_VectorIndex)
1201       return DiagnosticPredicateTy::NoMatch;
1202     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1203       return DiagnosticPredicateTy::Match;
1204     return DiagnosticPredicateTy::NearMatch;
1205   }
1206 
1207   bool isToken() const override { return Kind == k_Token; }
1208 
1209   bool isTokenEqual(StringRef Str) const {
1210     return Kind == k_Token && getToken() == Str;
1211   }
1212   bool isSysCR() const { return Kind == k_SysCR; }
1213   bool isPrefetch() const { return Kind == k_Prefetch; }
1214   bool isPSBHint() const { return Kind == k_PSBHint; }
1215   bool isBTIHint() const { return Kind == k_BTIHint; }
1216   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1217   bool isShifter() const {
1218     if (!isShiftExtend())
1219       return false;
1220 
1221     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1222     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1223             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1224             ST == AArch64_AM::MSL);
1225   }
1226 
1227   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1228     if (Kind != k_FPImm)
1229       return DiagnosticPredicateTy::NoMatch;
1230 
1231     if (getFPImmIsExact()) {
1232       // Lookup the immediate from table of supported immediates.
1233       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1234       assert(Desc && "Unknown enum value");
1235 
1236       // Calculate its FP value.
1237       APFloat RealVal(APFloat::IEEEdouble());
1238       if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1239           APFloat::opOK)
1240         llvm_unreachable("FP immediate is not exact");
1241 
1242       if (getFPImm().bitwiseIsEqual(RealVal))
1243         return DiagnosticPredicateTy::Match;
1244     }
1245 
1246     return DiagnosticPredicateTy::NearMatch;
1247   }
1248 
1249   template <unsigned ImmA, unsigned ImmB>
1250   DiagnosticPredicate isExactFPImm() const {
1251     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1252     if ((Res = isExactFPImm<ImmA>()))
1253       return DiagnosticPredicateTy::Match;
1254     if ((Res = isExactFPImm<ImmB>()))
1255       return DiagnosticPredicateTy::Match;
1256     return Res;
1257   }
1258 
1259   bool isExtend() const {
1260     if (!isShiftExtend())
1261       return false;
1262 
1263     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1264     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1265             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1266             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1267             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1268             ET == AArch64_AM::LSL) &&
1269            getShiftExtendAmount() <= 4;
1270   }
1271 
1272   bool isExtend64() const {
1273     if (!isExtend())
1274       return false;
1275     // Make sure the extend expects a 32-bit source register.
1276     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1277     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1278            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1279            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1280   }
1281 
1282   bool isExtendLSL64() const {
1283     if (!isExtend())
1284       return false;
1285     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1286     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1287             ET == AArch64_AM::LSL) &&
1288            getShiftExtendAmount() <= 4;
1289   }
1290 
1291   template<int Width> bool isMemXExtend() const {
1292     if (!isExtend())
1293       return false;
1294     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1295     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1296            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1297             getShiftExtendAmount() == 0);
1298   }
1299 
1300   template<int Width> bool isMemWExtend() const {
1301     if (!isExtend())
1302       return false;
1303     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1304     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1305            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1306             getShiftExtendAmount() == 0);
1307   }
1308 
1309   template <unsigned width>
1310   bool isArithmeticShifter() const {
1311     if (!isShifter())
1312       return false;
1313 
1314     // An arithmetic shifter is LSL, LSR, or ASR.
1315     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1316     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1317             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1318   }
1319 
1320   template <unsigned width>
1321   bool isLogicalShifter() const {
1322     if (!isShifter())
1323       return false;
1324 
1325     // A logical shifter is LSL, LSR, ASR or ROR.
1326     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1327     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1328             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1329            getShiftExtendAmount() < width;
1330   }
1331 
1332   bool isMovImm32Shifter() const {
1333     if (!isShifter())
1334       return false;
1335 
1336     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1337     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338     if (ST != AArch64_AM::LSL)
1339       return false;
1340     uint64_t Val = getShiftExtendAmount();
1341     return (Val == 0 || Val == 16);
1342   }
1343 
1344   bool isMovImm64Shifter() const {
1345     if (!isShifter())
1346       return false;
1347 
1348     // A MOVi shifter is LSL of 0 or 16.
1349     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1350     if (ST != AArch64_AM::LSL)
1351       return false;
1352     uint64_t Val = getShiftExtendAmount();
1353     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1354   }
1355 
1356   bool isLogicalVecShifter() const {
1357     if (!isShifter())
1358       return false;
1359 
1360     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1361     unsigned Shift = getShiftExtendAmount();
1362     return getShiftExtendType() == AArch64_AM::LSL &&
1363            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1364   }
1365 
1366   bool isLogicalVecHalfWordShifter() const {
1367     if (!isLogicalVecShifter())
1368       return false;
1369 
1370     // A logical vector shifter is a left shift by 0 or 8.
1371     unsigned Shift = getShiftExtendAmount();
1372     return getShiftExtendType() == AArch64_AM::LSL &&
1373            (Shift == 0 || Shift == 8);
1374   }
1375 
1376   bool isMoveVecShifter() const {
1377     if (!isShiftExtend())
1378       return false;
1379 
1380     // A logical vector shifter is a left shift by 8 or 16.
1381     unsigned Shift = getShiftExtendAmount();
1382     return getShiftExtendType() == AArch64_AM::MSL &&
1383            (Shift == 8 || Shift == 16);
1384   }
1385 
1386   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1387   // to LDUR/STUR when the offset is not legal for the former but is for
1388   // the latter. As such, in addition to checking for being a legal unscaled
1389   // address, also check that it is not a legal scaled address. This avoids
1390   // ambiguity in the matcher.
1391   template<int Width>
1392   bool isSImm9OffsetFB() const {
1393     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1394   }
1395 
1396   bool isAdrpLabel() const {
1397     // Validation was handled during parsing, so we just sanity check that
1398     // something didn't go haywire.
1399     if (!isImm())
1400         return false;
1401 
1402     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1403       int64_t Val = CE->getValue();
1404       int64_t Min = - (4096 * (1LL << (21 - 1)));
1405       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1406       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1407     }
1408 
1409     return true;
1410   }
1411 
1412   bool isAdrLabel() const {
1413     // Validation was handled during parsing, so we just sanity check that
1414     // something didn't go haywire.
1415     if (!isImm())
1416         return false;
1417 
1418     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1419       int64_t Val = CE->getValue();
1420       int64_t Min = - (1LL << (21 - 1));
1421       int64_t Max = ((1LL << (21 - 1)) - 1);
1422       return Val >= Min && Val <= Max;
1423     }
1424 
1425     return true;
1426   }
1427 
1428   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1429     // Add as immediates when possible.  Null MCExpr = 0.
1430     if (!Expr)
1431       Inst.addOperand(MCOperand::createImm(0));
1432     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1433       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1434     else
1435       Inst.addOperand(MCOperand::createExpr(Expr));
1436   }
1437 
1438   void addRegOperands(MCInst &Inst, unsigned N) const {
1439     assert(N == 1 && "Invalid number of operands!");
1440     Inst.addOperand(MCOperand::createReg(getReg()));
1441   }
1442 
1443   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1444     assert(N == 1 && "Invalid number of operands!");
1445     assert(
1446         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1447 
1448     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1449     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1450         RI->getEncodingValue(getReg()));
1451 
1452     Inst.addOperand(MCOperand::createReg(Reg));
1453   }
1454 
1455   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1456     assert(N == 1 && "Invalid number of operands!");
1457     assert(
1458         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1459 
1460     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1461     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1462         RI->getEncodingValue(getReg()));
1463 
1464     Inst.addOperand(MCOperand::createReg(Reg));
1465   }
1466 
1467   template <int Width>
1468   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1469     unsigned Base;
1470     switch (Width) {
1471     case 8:   Base = AArch64::B0; break;
1472     case 16:  Base = AArch64::H0; break;
1473     case 32:  Base = AArch64::S0; break;
1474     case 64:  Base = AArch64::D0; break;
1475     case 128: Base = AArch64::Q0; break;
1476     default:
1477       llvm_unreachable("Unsupported width");
1478     }
1479     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1480   }
1481 
1482   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1483     assert(N == 1 && "Invalid number of operands!");
1484     assert(
1485         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1486     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1487   }
1488 
1489   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1490     assert(N == 1 && "Invalid number of operands!");
1491     assert(
1492         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1493     Inst.addOperand(MCOperand::createReg(getReg()));
1494   }
1495 
1496   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1497     assert(N == 1 && "Invalid number of operands!");
1498     Inst.addOperand(MCOperand::createReg(getReg()));
1499   }
1500 
1501   enum VecListIndexType {
1502     VecListIdx_DReg = 0,
1503     VecListIdx_QReg = 1,
1504     VecListIdx_ZReg = 2,
1505   };
1506 
1507   template <VecListIndexType RegTy, unsigned NumRegs>
1508   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1509     assert(N == 1 && "Invalid number of operands!");
1510     static const unsigned FirstRegs[][5] = {
1511       /* DReg */ { AArch64::Q0,
1512                    AArch64::D0,       AArch64::D0_D1,
1513                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1514       /* QReg */ { AArch64::Q0,
1515                    AArch64::Q0,       AArch64::Q0_Q1,
1516                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1517       /* ZReg */ { AArch64::Z0,
1518                    AArch64::Z0,       AArch64::Z0_Z1,
1519                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1520     };
1521 
1522     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1523            " NumRegs must be <= 4 for ZRegs");
1524 
1525     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1526     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1527                                          FirstRegs[(unsigned)RegTy][0]));
1528   }
1529 
1530   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1531     assert(N == 1 && "Invalid number of operands!");
1532     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1533   }
1534 
1535   template <unsigned ImmIs0, unsigned ImmIs1>
1536   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1537     assert(N == 1 && "Invalid number of operands!");
1538     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1539     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1540   }
1541 
1542   void addImmOperands(MCInst &Inst, unsigned N) const {
1543     assert(N == 1 && "Invalid number of operands!");
1544     // If this is a pageoff symrefexpr with an addend, adjust the addend
1545     // to be only the page-offset portion. Otherwise, just add the expr
1546     // as-is.
1547     addExpr(Inst, getImm());
1548   }
1549 
1550   template <int Shift>
1551   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1552     assert(N == 2 && "Invalid number of operands!");
1553     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1554       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1555       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1556     } else if (isShiftedImm()) {
1557       addExpr(Inst, getShiftedImmVal());
1558       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1559     } else {
1560       addExpr(Inst, getImm());
1561       Inst.addOperand(MCOperand::createImm(0));
1562     }
1563   }
1564 
1565   template <int Shift>
1566   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1567     assert(N == 2 && "Invalid number of operands!");
1568     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1569       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1570       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1571     } else
1572       llvm_unreachable("Not a shifted negative immediate");
1573   }
1574 
1575   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1576     assert(N == 1 && "Invalid number of operands!");
1577     Inst.addOperand(MCOperand::createImm(getCondCode()));
1578   }
1579 
1580   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1581     assert(N == 1 && "Invalid number of operands!");
1582     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1583     if (!MCE)
1584       addExpr(Inst, getImm());
1585     else
1586       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1587   }
1588 
1589   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1590     addImmOperands(Inst, N);
1591   }
1592 
1593   template<int Scale>
1594   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1595     assert(N == 1 && "Invalid number of operands!");
1596     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1597 
1598     if (!MCE) {
1599       Inst.addOperand(MCOperand::createExpr(getImm()));
1600       return;
1601     }
1602     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1603   }
1604 
1605   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1606     assert(N == 1 && "Invalid number of operands!");
1607     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1608     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1609   }
1610 
1611   template <int Scale>
1612   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1613     assert(N == 1 && "Invalid number of operands!");
1614     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1615     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1616   }
1617 
1618   template <typename T>
1619   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1620     assert(N == 1 && "Invalid number of operands!");
1621     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1622     typename std::make_unsigned<T>::type Val = MCE->getValue();
1623     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1624     Inst.addOperand(MCOperand::createImm(encoding));
1625   }
1626 
1627   template <typename T>
1628   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1629     assert(N == 1 && "Invalid number of operands!");
1630     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1631     typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1632     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1633     Inst.addOperand(MCOperand::createImm(encoding));
1634   }
1635 
1636   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1637     assert(N == 1 && "Invalid number of operands!");
1638     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1639     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1640     Inst.addOperand(MCOperand::createImm(encoding));
1641   }
1642 
1643   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1644     // Branch operands don't encode the low bits, so shift them off
1645     // here. If it's a label, however, just put it on directly as there's
1646     // not enough information now to do anything.
1647     assert(N == 1 && "Invalid number of operands!");
1648     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1649     if (!MCE) {
1650       addExpr(Inst, getImm());
1651       return;
1652     }
1653     assert(MCE && "Invalid constant immediate operand!");
1654     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1655   }
1656 
1657   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1658     // Branch operands don't encode the low bits, so shift them off
1659     // here. If it's a label, however, just put it on directly as there's
1660     // not enough information now to do anything.
1661     assert(N == 1 && "Invalid number of operands!");
1662     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1663     if (!MCE) {
1664       addExpr(Inst, getImm());
1665       return;
1666     }
1667     assert(MCE && "Invalid constant immediate operand!");
1668     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1669   }
1670 
1671   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1672     // Branch operands don't encode the low bits, so shift them off
1673     // here. If it's a label, however, just put it on directly as there's
1674     // not enough information now to do anything.
1675     assert(N == 1 && "Invalid number of operands!");
1676     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1677     if (!MCE) {
1678       addExpr(Inst, getImm());
1679       return;
1680     }
1681     assert(MCE && "Invalid constant immediate operand!");
1682     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1683   }
1684 
1685   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1686     assert(N == 1 && "Invalid number of operands!");
1687     Inst.addOperand(MCOperand::createImm(
1688         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1689   }
1690 
1691   void addBarrierOperands(MCInst &Inst, unsigned N) const {
1692     assert(N == 1 && "Invalid number of operands!");
1693     Inst.addOperand(MCOperand::createImm(getBarrier()));
1694   }
1695 
1696   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1697     assert(N == 1 && "Invalid number of operands!");
1698 
1699     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1700   }
1701 
1702   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1703     assert(N == 1 && "Invalid number of operands!");
1704 
1705     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1706   }
1707 
1708   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1709     assert(N == 1 && "Invalid number of operands!");
1710 
1711     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1712   }
1713 
1714   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1715     assert(N == 1 && "Invalid number of operands!");
1716 
1717     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1718   }
1719 
1720   void addSysCROperands(MCInst &Inst, unsigned N) const {
1721     assert(N == 1 && "Invalid number of operands!");
1722     Inst.addOperand(MCOperand::createImm(getSysCR()));
1723   }
1724 
1725   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1726     assert(N == 1 && "Invalid number of operands!");
1727     Inst.addOperand(MCOperand::createImm(getPrefetch()));
1728   }
1729 
1730   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1731     assert(N == 1 && "Invalid number of operands!");
1732     Inst.addOperand(MCOperand::createImm(getPSBHint()));
1733   }
1734 
1735   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1736     assert(N == 1 && "Invalid number of operands!");
1737     Inst.addOperand(MCOperand::createImm(getBTIHint()));
1738   }
1739 
1740   void addShifterOperands(MCInst &Inst, unsigned N) const {
1741     assert(N == 1 && "Invalid number of operands!");
1742     unsigned Imm =
1743         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1744     Inst.addOperand(MCOperand::createImm(Imm));
1745   }
1746 
1747   void addExtendOperands(MCInst &Inst, unsigned N) const {
1748     assert(N == 1 && "Invalid number of operands!");
1749     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1750     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1751     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1752     Inst.addOperand(MCOperand::createImm(Imm));
1753   }
1754 
1755   void addExtend64Operands(MCInst &Inst, unsigned N) const {
1756     assert(N == 1 && "Invalid number of operands!");
1757     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1758     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1759     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1760     Inst.addOperand(MCOperand::createImm(Imm));
1761   }
1762 
1763   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1764     assert(N == 2 && "Invalid number of operands!");
1765     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1766     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1767     Inst.addOperand(MCOperand::createImm(IsSigned));
1768     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1769   }
1770 
1771   // For 8-bit load/store instructions with a register offset, both the
1772   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1773   // they're disambiguated by whether the shift was explicit or implicit rather
1774   // than its size.
1775   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1776     assert(N == 2 && "Invalid number of operands!");
1777     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1778     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1779     Inst.addOperand(MCOperand::createImm(IsSigned));
1780     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1781   }
1782 
1783   template<int Shift>
1784   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1785     assert(N == 1 && "Invalid number of operands!");
1786 
1787     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1788     uint64_t Value = CE->getValue();
1789     Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1790   }
1791 
1792   template<int Shift>
1793   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1794     assert(N == 1 && "Invalid number of operands!");
1795 
1796     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1797     uint64_t Value = CE->getValue();
1798     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1799   }
1800 
1801   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1802     assert(N == 1 && "Invalid number of operands!");
1803     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1804     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1805   }
1806 
1807   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1808     assert(N == 1 && "Invalid number of operands!");
1809     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1810     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1811   }
1812 
1813   void print(raw_ostream &OS) const override;
1814 
1815   static std::unique_ptr<AArch64Operand>
1816   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1817     auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1818     Op->Tok.Data = Str.data();
1819     Op->Tok.Length = Str.size();
1820     Op->Tok.IsSuffix = IsSuffix;
1821     Op->StartLoc = S;
1822     Op->EndLoc = S;
1823     return Op;
1824   }
1825 
1826   static std::unique_ptr<AArch64Operand>
1827   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1828             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1829             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1830             unsigned ShiftAmount = 0,
1831             unsigned HasExplicitAmount = false) {
1832     auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1833     Op->Reg.RegNum = RegNum;
1834     Op->Reg.Kind = Kind;
1835     Op->Reg.ElementWidth = 0;
1836     Op->Reg.EqualityTy = EqTy;
1837     Op->Reg.ShiftExtend.Type = ExtTy;
1838     Op->Reg.ShiftExtend.Amount = ShiftAmount;
1839     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1840     Op->StartLoc = S;
1841     Op->EndLoc = E;
1842     return Op;
1843   }
1844 
1845   static std::unique_ptr<AArch64Operand>
1846   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1847                   SMLoc S, SMLoc E, MCContext &Ctx,
1848                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1849                   unsigned ShiftAmount = 0,
1850                   unsigned HasExplicitAmount = false) {
1851     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1852             Kind == RegKind::SVEPredicateVector) &&
1853            "Invalid vector kind");
1854     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1855                         HasExplicitAmount);
1856     Op->Reg.ElementWidth = ElementWidth;
1857     return Op;
1858   }
1859 
1860   static std::unique_ptr<AArch64Operand>
1861   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1862                    unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1863                    MCContext &Ctx) {
1864     auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1865     Op->VectorList.RegNum = RegNum;
1866     Op->VectorList.Count = Count;
1867     Op->VectorList.NumElements = NumElements;
1868     Op->VectorList.ElementWidth = ElementWidth;
1869     Op->VectorList.RegisterKind = RegisterKind;
1870     Op->StartLoc = S;
1871     Op->EndLoc = E;
1872     return Op;
1873   }
1874 
1875   static std::unique_ptr<AArch64Operand>
1876   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1877     auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1878     Op->VectorIndex.Val = Idx;
1879     Op->StartLoc = S;
1880     Op->EndLoc = E;
1881     return Op;
1882   }
1883 
1884   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1885                                                    SMLoc E, MCContext &Ctx) {
1886     auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1887     Op->Imm.Val = Val;
1888     Op->StartLoc = S;
1889     Op->EndLoc = E;
1890     return Op;
1891   }
1892 
1893   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1894                                                           unsigned ShiftAmount,
1895                                                           SMLoc S, SMLoc E,
1896                                                           MCContext &Ctx) {
1897     auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1898     Op->ShiftedImm .Val = Val;
1899     Op->ShiftedImm.ShiftAmount = ShiftAmount;
1900     Op->StartLoc = S;
1901     Op->EndLoc = E;
1902     return Op;
1903   }
1904 
1905   static std::unique_ptr<AArch64Operand>
1906   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1907     auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1908     Op->CondCode.Code = Code;
1909     Op->StartLoc = S;
1910     Op->EndLoc = E;
1911     return Op;
1912   }
1913 
1914   static std::unique_ptr<AArch64Operand>
1915   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1916     auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1917     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1918     Op->FPImm.IsExact = IsExact;
1919     Op->StartLoc = S;
1920     Op->EndLoc = S;
1921     return Op;
1922   }
1923 
1924   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1925                                                        StringRef Str,
1926                                                        SMLoc S,
1927                                                        MCContext &Ctx) {
1928     auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1929     Op->Barrier.Val = Val;
1930     Op->Barrier.Data = Str.data();
1931     Op->Barrier.Length = Str.size();
1932     Op->StartLoc = S;
1933     Op->EndLoc = S;
1934     return Op;
1935   }
1936 
1937   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1938                                                       uint32_t MRSReg,
1939                                                       uint32_t MSRReg,
1940                                                       uint32_t PStateField,
1941                                                       MCContext &Ctx) {
1942     auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1943     Op->SysReg.Data = Str.data();
1944     Op->SysReg.Length = Str.size();
1945     Op->SysReg.MRSReg = MRSReg;
1946     Op->SysReg.MSRReg = MSRReg;
1947     Op->SysReg.PStateField = PStateField;
1948     Op->StartLoc = S;
1949     Op->EndLoc = S;
1950     return Op;
1951   }
1952 
1953   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1954                                                      SMLoc E, MCContext &Ctx) {
1955     auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1956     Op->SysCRImm.Val = Val;
1957     Op->StartLoc = S;
1958     Op->EndLoc = E;
1959     return Op;
1960   }
1961 
1962   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1963                                                         StringRef Str,
1964                                                         SMLoc S,
1965                                                         MCContext &Ctx) {
1966     auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1967     Op->Prefetch.Val = Val;
1968     Op->Barrier.Data = Str.data();
1969     Op->Barrier.Length = Str.size();
1970     Op->StartLoc = S;
1971     Op->EndLoc = S;
1972     return Op;
1973   }
1974 
1975   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1976                                                        StringRef Str,
1977                                                        SMLoc S,
1978                                                        MCContext &Ctx) {
1979     auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1980     Op->PSBHint.Val = Val;
1981     Op->PSBHint.Data = Str.data();
1982     Op->PSBHint.Length = Str.size();
1983     Op->StartLoc = S;
1984     Op->EndLoc = S;
1985     return Op;
1986   }
1987 
1988   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
1989                                                        StringRef Str,
1990                                                        SMLoc S,
1991                                                        MCContext &Ctx) {
1992     auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1993     Op->BTIHint.Val = Val << 1 | 32;
1994     Op->BTIHint.Data = Str.data();
1995     Op->BTIHint.Length = Str.size();
1996     Op->StartLoc = S;
1997     Op->EndLoc = S;
1998     return Op;
1999   }
2000 
2001   static std::unique_ptr<AArch64Operand>
2002   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2003                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2004     auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2005     Op->ShiftExtend.Type = ShOp;
2006     Op->ShiftExtend.Amount = Val;
2007     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2008     Op->StartLoc = S;
2009     Op->EndLoc = E;
2010     return Op;
2011   }
2012 };
2013 
2014 } // end anonymous namespace.
2015 
2016 void AArch64Operand::print(raw_ostream &OS) const {
2017   switch (Kind) {
2018   case k_FPImm:
2019     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2020     if (!getFPImmIsExact())
2021       OS << " (inexact)";
2022     OS << ">";
2023     break;
2024   case k_Barrier: {
2025     StringRef Name = getBarrierName();
2026     if (!Name.empty())
2027       OS << "<barrier " << Name << ">";
2028     else
2029       OS << "<barrier invalid #" << getBarrier() << ">";
2030     break;
2031   }
2032   case k_Immediate:
2033     OS << *getImm();
2034     break;
2035   case k_ShiftedImm: {
2036     unsigned Shift = getShiftedImmShift();
2037     OS << "<shiftedimm ";
2038     OS << *getShiftedImmVal();
2039     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2040     break;
2041   }
2042   case k_CondCode:
2043     OS << "<condcode " << getCondCode() << ">";
2044     break;
2045   case k_VectorList: {
2046     OS << "<vectorlist ";
2047     unsigned Reg = getVectorListStart();
2048     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2049       OS << Reg + i << " ";
2050     OS << ">";
2051     break;
2052   }
2053   case k_VectorIndex:
2054     OS << "<vectorindex " << getVectorIndex() << ">";
2055     break;
2056   case k_SysReg:
2057     OS << "<sysreg: " << getSysReg() << '>';
2058     break;
2059   case k_Token:
2060     OS << "'" << getToken() << "'";
2061     break;
2062   case k_SysCR:
2063     OS << "c" << getSysCR();
2064     break;
2065   case k_Prefetch: {
2066     StringRef Name = getPrefetchName();
2067     if (!Name.empty())
2068       OS << "<prfop " << Name << ">";
2069     else
2070       OS << "<prfop invalid #" << getPrefetch() << ">";
2071     break;
2072   }
2073   case k_PSBHint:
2074     OS << getPSBHintName();
2075     break;
2076   case k_Register:
2077     OS << "<register " << getReg() << ">";
2078     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2079       break;
2080     LLVM_FALLTHROUGH;
2081   case k_BTIHint:
2082     OS << getBTIHintName();
2083     break;
2084   case k_ShiftExtend:
2085     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2086        << getShiftExtendAmount();
2087     if (!hasShiftExtendAmount())
2088       OS << "<imp>";
2089     OS << '>';
2090     break;
2091   }
2092 }
2093 
2094 /// @name Auto-generated Match Functions
2095 /// {
2096 
2097 static unsigned MatchRegisterName(StringRef Name);
2098 
2099 /// }
2100 
2101 static unsigned MatchNeonVectorRegName(StringRef Name) {
2102   return StringSwitch<unsigned>(Name.lower())
2103       .Case("v0", AArch64::Q0)
2104       .Case("v1", AArch64::Q1)
2105       .Case("v2", AArch64::Q2)
2106       .Case("v3", AArch64::Q3)
2107       .Case("v4", AArch64::Q4)
2108       .Case("v5", AArch64::Q5)
2109       .Case("v6", AArch64::Q6)
2110       .Case("v7", AArch64::Q7)
2111       .Case("v8", AArch64::Q8)
2112       .Case("v9", AArch64::Q9)
2113       .Case("v10", AArch64::Q10)
2114       .Case("v11", AArch64::Q11)
2115       .Case("v12", AArch64::Q12)
2116       .Case("v13", AArch64::Q13)
2117       .Case("v14", AArch64::Q14)
2118       .Case("v15", AArch64::Q15)
2119       .Case("v16", AArch64::Q16)
2120       .Case("v17", AArch64::Q17)
2121       .Case("v18", AArch64::Q18)
2122       .Case("v19", AArch64::Q19)
2123       .Case("v20", AArch64::Q20)
2124       .Case("v21", AArch64::Q21)
2125       .Case("v22", AArch64::Q22)
2126       .Case("v23", AArch64::Q23)
2127       .Case("v24", AArch64::Q24)
2128       .Case("v25", AArch64::Q25)
2129       .Case("v26", AArch64::Q26)
2130       .Case("v27", AArch64::Q27)
2131       .Case("v28", AArch64::Q28)
2132       .Case("v29", AArch64::Q29)
2133       .Case("v30", AArch64::Q30)
2134       .Case("v31", AArch64::Q31)
2135       .Default(0);
2136 }
2137 
2138 /// Returns an optional pair of (#elements, element-width) if Suffix
2139 /// is a valid vector kind. Where the number of elements in a vector
2140 /// or the vector width is implicit or explicitly unknown (but still a
2141 /// valid suffix kind), 0 is used.
2142 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2143                                                      RegKind VectorKind) {
2144   std::pair<int, int> Res = {-1, -1};
2145 
2146   switch (VectorKind) {
2147   case RegKind::NeonVector:
2148     Res =
2149         StringSwitch<std::pair<int, int>>(Suffix.lower())
2150             .Case("", {0, 0})
2151             .Case(".1d", {1, 64})
2152             .Case(".1q", {1, 128})
2153             // '.2h' needed for fp16 scalar pairwise reductions
2154             .Case(".2h", {2, 16})
2155             .Case(".2s", {2, 32})
2156             .Case(".2d", {2, 64})
2157             // '.4b' is another special case for the ARMv8.2a dot product
2158             // operand
2159             .Case(".4b", {4, 8})
2160             .Case(".4h", {4, 16})
2161             .Case(".4s", {4, 32})
2162             .Case(".8b", {8, 8})
2163             .Case(".8h", {8, 16})
2164             .Case(".16b", {16, 8})
2165             // Accept the width neutral ones, too, for verbose syntax. If those
2166             // aren't used in the right places, the token operand won't match so
2167             // all will work out.
2168             .Case(".b", {0, 8})
2169             .Case(".h", {0, 16})
2170             .Case(".s", {0, 32})
2171             .Case(".d", {0, 64})
2172             .Default({-1, -1});
2173     break;
2174   case RegKind::SVEPredicateVector:
2175   case RegKind::SVEDataVector:
2176     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2177               .Case("", {0, 0})
2178               .Case(".b", {0, 8})
2179               .Case(".h", {0, 16})
2180               .Case(".s", {0, 32})
2181               .Case(".d", {0, 64})
2182               .Case(".q", {0, 128})
2183               .Default({-1, -1});
2184     break;
2185   default:
2186     llvm_unreachable("Unsupported RegKind");
2187   }
2188 
2189   if (Res == std::make_pair(-1, -1))
2190     return Optional<std::pair<int, int>>();
2191 
2192   return Optional<std::pair<int, int>>(Res);
2193 }
2194 
2195 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2196   return parseVectorKind(Suffix, VectorKind).hasValue();
2197 }
2198 
2199 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2200   return StringSwitch<unsigned>(Name.lower())
2201       .Case("z0", AArch64::Z0)
2202       .Case("z1", AArch64::Z1)
2203       .Case("z2", AArch64::Z2)
2204       .Case("z3", AArch64::Z3)
2205       .Case("z4", AArch64::Z4)
2206       .Case("z5", AArch64::Z5)
2207       .Case("z6", AArch64::Z6)
2208       .Case("z7", AArch64::Z7)
2209       .Case("z8", AArch64::Z8)
2210       .Case("z9", AArch64::Z9)
2211       .Case("z10", AArch64::Z10)
2212       .Case("z11", AArch64::Z11)
2213       .Case("z12", AArch64::Z12)
2214       .Case("z13", AArch64::Z13)
2215       .Case("z14", AArch64::Z14)
2216       .Case("z15", AArch64::Z15)
2217       .Case("z16", AArch64::Z16)
2218       .Case("z17", AArch64::Z17)
2219       .Case("z18", AArch64::Z18)
2220       .Case("z19", AArch64::Z19)
2221       .Case("z20", AArch64::Z20)
2222       .Case("z21", AArch64::Z21)
2223       .Case("z22", AArch64::Z22)
2224       .Case("z23", AArch64::Z23)
2225       .Case("z24", AArch64::Z24)
2226       .Case("z25", AArch64::Z25)
2227       .Case("z26", AArch64::Z26)
2228       .Case("z27", AArch64::Z27)
2229       .Case("z28", AArch64::Z28)
2230       .Case("z29", AArch64::Z29)
2231       .Case("z30", AArch64::Z30)
2232       .Case("z31", AArch64::Z31)
2233       .Default(0);
2234 }
2235 
2236 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2237   return StringSwitch<unsigned>(Name.lower())
2238       .Case("p0", AArch64::P0)
2239       .Case("p1", AArch64::P1)
2240       .Case("p2", AArch64::P2)
2241       .Case("p3", AArch64::P3)
2242       .Case("p4", AArch64::P4)
2243       .Case("p5", AArch64::P5)
2244       .Case("p6", AArch64::P6)
2245       .Case("p7", AArch64::P7)
2246       .Case("p8", AArch64::P8)
2247       .Case("p9", AArch64::P9)
2248       .Case("p10", AArch64::P10)
2249       .Case("p11", AArch64::P11)
2250       .Case("p12", AArch64::P12)
2251       .Case("p13", AArch64::P13)
2252       .Case("p14", AArch64::P14)
2253       .Case("p15", AArch64::P15)
2254       .Default(0);
2255 }
2256 
2257 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2258                                      SMLoc &EndLoc) {
2259   StartLoc = getLoc();
2260   auto Res = tryParseScalarRegister(RegNo);
2261   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2262   return Res != MatchOperand_Success;
2263 }
2264 
2265 // Matches a register name or register alias previously defined by '.req'
2266 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2267                                                   RegKind Kind) {
2268   unsigned RegNum = 0;
2269   if ((RegNum = matchSVEDataVectorRegName(Name)))
2270     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2271 
2272   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2273     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2274 
2275   if ((RegNum = MatchNeonVectorRegName(Name)))
2276     return Kind == RegKind::NeonVector ? RegNum : 0;
2277 
2278   // The parsed register must be of RegKind Scalar
2279   if ((RegNum = MatchRegisterName(Name)))
2280     return Kind == RegKind::Scalar ? RegNum : 0;
2281 
2282   if (!RegNum) {
2283     // Handle a few common aliases of registers.
2284     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2285                     .Case("fp", AArch64::FP)
2286                     .Case("lr",  AArch64::LR)
2287                     .Case("x31", AArch64::XZR)
2288                     .Case("w31", AArch64::WZR)
2289                     .Default(0))
2290       return Kind == RegKind::Scalar ? RegNum : 0;
2291 
2292     // Check for aliases registered via .req. Canonicalize to lower case.
2293     // That's more consistent since register names are case insensitive, and
2294     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2295     auto Entry = RegisterReqs.find(Name.lower());
2296     if (Entry == RegisterReqs.end())
2297       return 0;
2298 
2299     // set RegNum if the match is the right kind of register
2300     if (Kind == Entry->getValue().first)
2301       RegNum = Entry->getValue().second;
2302   }
2303   return RegNum;
2304 }
2305 
2306 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2307 /// Identifier when called, and if it is a register name the token is eaten and
2308 /// the register is added to the operand list.
2309 OperandMatchResultTy
2310 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2311   MCAsmParser &Parser = getParser();
2312   const AsmToken &Tok = Parser.getTok();
2313   if (Tok.isNot(AsmToken::Identifier))
2314     return MatchOperand_NoMatch;
2315 
2316   std::string lowerCase = Tok.getString().lower();
2317   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2318   if (Reg == 0)
2319     return MatchOperand_NoMatch;
2320 
2321   RegNum = Reg;
2322   Parser.Lex(); // Eat identifier token.
2323   return MatchOperand_Success;
2324 }
2325 
2326 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2327 OperandMatchResultTy
2328 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2329   MCAsmParser &Parser = getParser();
2330   SMLoc S = getLoc();
2331 
2332   if (Parser.getTok().isNot(AsmToken::Identifier)) {
2333     Error(S, "Expected cN operand where 0 <= N <= 15");
2334     return MatchOperand_ParseFail;
2335   }
2336 
2337   StringRef Tok = Parser.getTok().getIdentifier();
2338   if (Tok[0] != 'c' && Tok[0] != 'C') {
2339     Error(S, "Expected cN operand where 0 <= N <= 15");
2340     return MatchOperand_ParseFail;
2341   }
2342 
2343   uint32_t CRNum;
2344   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2345   if (BadNum || CRNum > 15) {
2346     Error(S, "Expected cN operand where 0 <= N <= 15");
2347     return MatchOperand_ParseFail;
2348   }
2349 
2350   Parser.Lex(); // Eat identifier token.
2351   Operands.push_back(
2352       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2353   return MatchOperand_Success;
2354 }
2355 
2356 /// tryParsePrefetch - Try to parse a prefetch operand.
2357 template <bool IsSVEPrefetch>
2358 OperandMatchResultTy
2359 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2360   MCAsmParser &Parser = getParser();
2361   SMLoc S = getLoc();
2362   const AsmToken &Tok = Parser.getTok();
2363 
2364   auto LookupByName = [](StringRef N) {
2365     if (IsSVEPrefetch) {
2366       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2367         return Optional<unsigned>(Res->Encoding);
2368     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2369       return Optional<unsigned>(Res->Encoding);
2370     return Optional<unsigned>();
2371   };
2372 
2373   auto LookupByEncoding = [](unsigned E) {
2374     if (IsSVEPrefetch) {
2375       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2376         return Optional<StringRef>(Res->Name);
2377     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2378       return Optional<StringRef>(Res->Name);
2379     return Optional<StringRef>();
2380   };
2381   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2382 
2383   // Either an identifier for named values or a 5-bit immediate.
2384   // Eat optional hash.
2385   if (parseOptionalToken(AsmToken::Hash) ||
2386       Tok.is(AsmToken::Integer)) {
2387     const MCExpr *ImmVal;
2388     if (getParser().parseExpression(ImmVal))
2389       return MatchOperand_ParseFail;
2390 
2391     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2392     if (!MCE) {
2393       TokError("immediate value expected for prefetch operand");
2394       return MatchOperand_ParseFail;
2395     }
2396     unsigned prfop = MCE->getValue();
2397     if (prfop > MaxVal) {
2398       TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2399                "] expected");
2400       return MatchOperand_ParseFail;
2401     }
2402 
2403     auto PRFM = LookupByEncoding(MCE->getValue());
2404     Operands.push_back(AArch64Operand::CreatePrefetch(
2405         prfop, PRFM.getValueOr(""), S, getContext()));
2406     return MatchOperand_Success;
2407   }
2408 
2409   if (Tok.isNot(AsmToken::Identifier)) {
2410     TokError("prefetch hint expected");
2411     return MatchOperand_ParseFail;
2412   }
2413 
2414   auto PRFM = LookupByName(Tok.getString());
2415   if (!PRFM) {
2416     TokError("prefetch hint expected");
2417     return MatchOperand_ParseFail;
2418   }
2419 
2420   Parser.Lex(); // Eat identifier token.
2421   Operands.push_back(AArch64Operand::CreatePrefetch(
2422       *PRFM, Tok.getString(), S, getContext()));
2423   return MatchOperand_Success;
2424 }
2425 
2426 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2427 OperandMatchResultTy
2428 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2429   MCAsmParser &Parser = getParser();
2430   SMLoc S = getLoc();
2431   const AsmToken &Tok = Parser.getTok();
2432   if (Tok.isNot(AsmToken::Identifier)) {
2433     TokError("invalid operand for instruction");
2434     return MatchOperand_ParseFail;
2435   }
2436 
2437   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2438   if (!PSB) {
2439     TokError("invalid operand for instruction");
2440     return MatchOperand_ParseFail;
2441   }
2442 
2443   Parser.Lex(); // Eat identifier token.
2444   Operands.push_back(AArch64Operand::CreatePSBHint(
2445       PSB->Encoding, Tok.getString(), S, getContext()));
2446   return MatchOperand_Success;
2447 }
2448 
2449 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2450 OperandMatchResultTy
2451 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2452   MCAsmParser &Parser = getParser();
2453   SMLoc S = getLoc();
2454   const AsmToken &Tok = Parser.getTok();
2455   if (Tok.isNot(AsmToken::Identifier)) {
2456     TokError("invalid operand for instruction");
2457     return MatchOperand_ParseFail;
2458   }
2459 
2460   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2461   if (!BTI) {
2462     TokError("invalid operand for instruction");
2463     return MatchOperand_ParseFail;
2464   }
2465 
2466   Parser.Lex(); // Eat identifier token.
2467   Operands.push_back(AArch64Operand::CreateBTIHint(
2468       BTI->Encoding, Tok.getString(), S, getContext()));
2469   return MatchOperand_Success;
2470 }
2471 
2472 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2473 /// instruction.
2474 OperandMatchResultTy
2475 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2476   MCAsmParser &Parser = getParser();
2477   SMLoc S = getLoc();
2478   const MCExpr *Expr = nullptr;
2479 
2480   if (Parser.getTok().is(AsmToken::Hash)) {
2481     Parser.Lex(); // Eat hash token.
2482   }
2483 
2484   if (parseSymbolicImmVal(Expr))
2485     return MatchOperand_ParseFail;
2486 
2487   AArch64MCExpr::VariantKind ELFRefKind;
2488   MCSymbolRefExpr::VariantKind DarwinRefKind;
2489   int64_t Addend;
2490   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2491     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2492         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2493       // No modifier was specified at all; this is the syntax for an ELF basic
2494       // ADRP relocation (unfortunately).
2495       Expr =
2496           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2497     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2498                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2499                Addend != 0) {
2500       Error(S, "gotpage label reference not allowed an addend");
2501       return MatchOperand_ParseFail;
2502     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2503                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2504                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2505                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2506                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2507                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2508                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2509       // The operand must be an @page or @gotpage qualified symbolref.
2510       Error(S, "page or gotpage label reference expected");
2511       return MatchOperand_ParseFail;
2512     }
2513   }
2514 
2515   // We have either a label reference possibly with addend or an immediate. The
2516   // addend is a raw value here. The linker will adjust it to only reference the
2517   // page.
2518   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2519   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2520 
2521   return MatchOperand_Success;
2522 }
2523 
2524 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2525 /// instruction.
2526 OperandMatchResultTy
2527 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2528   SMLoc S = getLoc();
2529   const MCExpr *Expr = nullptr;
2530 
2531   // Leave anything with a bracket to the default for SVE
2532   if (getParser().getTok().is(AsmToken::LBrac))
2533     return MatchOperand_NoMatch;
2534 
2535   if (getParser().getTok().is(AsmToken::Hash))
2536     getParser().Lex(); // Eat hash token.
2537 
2538   if (parseSymbolicImmVal(Expr))
2539     return MatchOperand_ParseFail;
2540 
2541   AArch64MCExpr::VariantKind ELFRefKind;
2542   MCSymbolRefExpr::VariantKind DarwinRefKind;
2543   int64_t Addend;
2544   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2545     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2546         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2547       // No modifier was specified at all; this is the syntax for an ELF basic
2548       // ADR relocation (unfortunately).
2549       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2550     } else {
2551       Error(S, "unexpected adr label");
2552       return MatchOperand_ParseFail;
2553     }
2554   }
2555 
2556   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2557   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2558   return MatchOperand_Success;
2559 }
2560 
2561 /// tryParseFPImm - A floating point immediate expression operand.
2562 template<bool AddFPZeroAsLiteral>
2563 OperandMatchResultTy
2564 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2565   MCAsmParser &Parser = getParser();
2566   SMLoc S = getLoc();
2567 
2568   bool Hash = parseOptionalToken(AsmToken::Hash);
2569 
2570   // Handle negation, as that still comes through as a separate token.
2571   bool isNegative = parseOptionalToken(AsmToken::Minus);
2572 
2573   const AsmToken &Tok = Parser.getTok();
2574   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2575     if (!Hash)
2576       return MatchOperand_NoMatch;
2577     TokError("invalid floating point immediate");
2578     return MatchOperand_ParseFail;
2579   }
2580 
2581   // Parse hexadecimal representation.
2582   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2583     if (Tok.getIntVal() > 255 || isNegative) {
2584       TokError("encoded floating point value out of range");
2585       return MatchOperand_ParseFail;
2586     }
2587 
2588     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2589     Operands.push_back(
2590         AArch64Operand::CreateFPImm(F, true, S, getContext()));
2591   } else {
2592     // Parse FP representation.
2593     APFloat RealVal(APFloat::IEEEdouble());
2594     auto Status =
2595         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2596     if (isNegative)
2597       RealVal.changeSign();
2598 
2599     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2600       Operands.push_back(
2601           AArch64Operand::CreateToken("#0", false, S, getContext()));
2602       Operands.push_back(
2603           AArch64Operand::CreateToken(".0", false, S, getContext()));
2604     } else
2605       Operands.push_back(AArch64Operand::CreateFPImm(
2606           RealVal, Status == APFloat::opOK, S, getContext()));
2607   }
2608 
2609   Parser.Lex(); // Eat the token.
2610 
2611   return MatchOperand_Success;
2612 }
2613 
2614 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2615 /// a shift suffix, for example '#1, lsl #12'.
2616 OperandMatchResultTy
2617 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2618   MCAsmParser &Parser = getParser();
2619   SMLoc S = getLoc();
2620 
2621   if (Parser.getTok().is(AsmToken::Hash))
2622     Parser.Lex(); // Eat '#'
2623   else if (Parser.getTok().isNot(AsmToken::Integer))
2624     // Operand should start from # or should be integer, emit error otherwise.
2625     return MatchOperand_NoMatch;
2626 
2627   const MCExpr *Imm = nullptr;
2628   if (parseSymbolicImmVal(Imm))
2629     return MatchOperand_ParseFail;
2630   else if (Parser.getTok().isNot(AsmToken::Comma)) {
2631     SMLoc E = Parser.getTok().getLoc();
2632     Operands.push_back(
2633         AArch64Operand::CreateImm(Imm, S, E, getContext()));
2634     return MatchOperand_Success;
2635   }
2636 
2637   // Eat ','
2638   Parser.Lex();
2639 
2640   // The optional operand must be "lsl #N" where N is non-negative.
2641   if (!Parser.getTok().is(AsmToken::Identifier) ||
2642       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2643     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2644     return MatchOperand_ParseFail;
2645   }
2646 
2647   // Eat 'lsl'
2648   Parser.Lex();
2649 
2650   parseOptionalToken(AsmToken::Hash);
2651 
2652   if (Parser.getTok().isNot(AsmToken::Integer)) {
2653     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2654     return MatchOperand_ParseFail;
2655   }
2656 
2657   int64_t ShiftAmount = Parser.getTok().getIntVal();
2658 
2659   if (ShiftAmount < 0) {
2660     Error(Parser.getTok().getLoc(), "positive shift amount required");
2661     return MatchOperand_ParseFail;
2662   }
2663   Parser.Lex(); // Eat the number
2664 
2665   // Just in case the optional lsl #0 is used for immediates other than zero.
2666   if (ShiftAmount == 0 && Imm != nullptr) {
2667     SMLoc E = Parser.getTok().getLoc();
2668     Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2669     return MatchOperand_Success;
2670   }
2671 
2672   SMLoc E = Parser.getTok().getLoc();
2673   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2674                                                       S, E, getContext()));
2675   return MatchOperand_Success;
2676 }
2677 
2678 /// parseCondCodeString - Parse a Condition Code string.
2679 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2680   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2681                     .Case("eq", AArch64CC::EQ)
2682                     .Case("ne", AArch64CC::NE)
2683                     .Case("cs", AArch64CC::HS)
2684                     .Case("hs", AArch64CC::HS)
2685                     .Case("cc", AArch64CC::LO)
2686                     .Case("lo", AArch64CC::LO)
2687                     .Case("mi", AArch64CC::MI)
2688                     .Case("pl", AArch64CC::PL)
2689                     .Case("vs", AArch64CC::VS)
2690                     .Case("vc", AArch64CC::VC)
2691                     .Case("hi", AArch64CC::HI)
2692                     .Case("ls", AArch64CC::LS)
2693                     .Case("ge", AArch64CC::GE)
2694                     .Case("lt", AArch64CC::LT)
2695                     .Case("gt", AArch64CC::GT)
2696                     .Case("le", AArch64CC::LE)
2697                     .Case("al", AArch64CC::AL)
2698                     .Case("nv", AArch64CC::NV)
2699                     .Default(AArch64CC::Invalid);
2700 
2701   if (CC == AArch64CC::Invalid &&
2702       getSTI().getFeatureBits()[AArch64::FeatureSVE])
2703     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2704                     .Case("none",  AArch64CC::EQ)
2705                     .Case("any",   AArch64CC::NE)
2706                     .Case("nlast", AArch64CC::HS)
2707                     .Case("last",  AArch64CC::LO)
2708                     .Case("first", AArch64CC::MI)
2709                     .Case("nfrst", AArch64CC::PL)
2710                     .Case("pmore", AArch64CC::HI)
2711                     .Case("plast", AArch64CC::LS)
2712                     .Case("tcont", AArch64CC::GE)
2713                     .Case("tstop", AArch64CC::LT)
2714                     .Default(AArch64CC::Invalid);
2715 
2716   return CC;
2717 }
2718 
2719 /// parseCondCode - Parse a Condition Code operand.
2720 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2721                                      bool invertCondCode) {
2722   MCAsmParser &Parser = getParser();
2723   SMLoc S = getLoc();
2724   const AsmToken &Tok = Parser.getTok();
2725   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2726 
2727   StringRef Cond = Tok.getString();
2728   AArch64CC::CondCode CC = parseCondCodeString(Cond);
2729   if (CC == AArch64CC::Invalid)
2730     return TokError("invalid condition code");
2731   Parser.Lex(); // Eat identifier token.
2732 
2733   if (invertCondCode) {
2734     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2735       return TokError("condition codes AL and NV are invalid for this instruction");
2736     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2737   }
2738 
2739   Operands.push_back(
2740       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2741   return false;
2742 }
2743 
2744 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2745 /// them if present.
2746 OperandMatchResultTy
2747 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2748   MCAsmParser &Parser = getParser();
2749   const AsmToken &Tok = Parser.getTok();
2750   std::string LowerID = Tok.getString().lower();
2751   AArch64_AM::ShiftExtendType ShOp =
2752       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2753           .Case("lsl", AArch64_AM::LSL)
2754           .Case("lsr", AArch64_AM::LSR)
2755           .Case("asr", AArch64_AM::ASR)
2756           .Case("ror", AArch64_AM::ROR)
2757           .Case("msl", AArch64_AM::MSL)
2758           .Case("uxtb", AArch64_AM::UXTB)
2759           .Case("uxth", AArch64_AM::UXTH)
2760           .Case("uxtw", AArch64_AM::UXTW)
2761           .Case("uxtx", AArch64_AM::UXTX)
2762           .Case("sxtb", AArch64_AM::SXTB)
2763           .Case("sxth", AArch64_AM::SXTH)
2764           .Case("sxtw", AArch64_AM::SXTW)
2765           .Case("sxtx", AArch64_AM::SXTX)
2766           .Default(AArch64_AM::InvalidShiftExtend);
2767 
2768   if (ShOp == AArch64_AM::InvalidShiftExtend)
2769     return MatchOperand_NoMatch;
2770 
2771   SMLoc S = Tok.getLoc();
2772   Parser.Lex();
2773 
2774   bool Hash = parseOptionalToken(AsmToken::Hash);
2775 
2776   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2777     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2778         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2779         ShOp == AArch64_AM::MSL) {
2780       // We expect a number here.
2781       TokError("expected #imm after shift specifier");
2782       return MatchOperand_ParseFail;
2783     }
2784 
2785     // "extend" type operations don't need an immediate, #0 is implicit.
2786     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2787     Operands.push_back(
2788         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2789     return MatchOperand_Success;
2790   }
2791 
2792   // Make sure we do actually have a number, identifier or a parenthesized
2793   // expression.
2794   SMLoc E = Parser.getTok().getLoc();
2795   if (!Parser.getTok().is(AsmToken::Integer) &&
2796       !Parser.getTok().is(AsmToken::LParen) &&
2797       !Parser.getTok().is(AsmToken::Identifier)) {
2798     Error(E, "expected integer shift amount");
2799     return MatchOperand_ParseFail;
2800   }
2801 
2802   const MCExpr *ImmVal;
2803   if (getParser().parseExpression(ImmVal))
2804     return MatchOperand_ParseFail;
2805 
2806   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2807   if (!MCE) {
2808     Error(E, "expected constant '#imm' after shift specifier");
2809     return MatchOperand_ParseFail;
2810   }
2811 
2812   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2813   Operands.push_back(AArch64Operand::CreateShiftExtend(
2814       ShOp, MCE->getValue(), true, S, E, getContext()));
2815   return MatchOperand_Success;
2816 }
2817 
2818 static const struct Extension {
2819   const char *Name;
2820   const FeatureBitset Features;
2821 } ExtensionMap[] = {
2822     {"crc", {AArch64::FeatureCRC}},
2823     {"sm4", {AArch64::FeatureSM4}},
2824     {"sha3", {AArch64::FeatureSHA3}},
2825     {"sha2", {AArch64::FeatureSHA2}},
2826     {"aes", {AArch64::FeatureAES}},
2827     {"crypto", {AArch64::FeatureCrypto}},
2828     {"fp", {AArch64::FeatureFPARMv8}},
2829     {"simd", {AArch64::FeatureNEON}},
2830     {"ras", {AArch64::FeatureRAS}},
2831     {"lse", {AArch64::FeatureLSE}},
2832     {"predres", {AArch64::FeaturePredRes}},
2833     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
2834     {"mte", {AArch64::FeatureMTE}},
2835     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2836     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
2837     {"ccpp", {AArch64::FeatureCCPP}},
2838     {"sve", {AArch64::FeatureSVE}},
2839     {"sve2", {AArch64::FeatureSVE2}},
2840     {"sve2-aes", {AArch64::FeatureSVE2AES}},
2841     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
2842     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
2843     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
2844     // FIXME: Unsupported extensions
2845     {"pan", {}},
2846     {"lor", {}},
2847     {"rdma", {}},
2848     {"profile", {}},
2849 };
2850 
2851 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2852   if (FBS[AArch64::HasV8_1aOps])
2853     Str += "ARMv8.1a";
2854   else if (FBS[AArch64::HasV8_2aOps])
2855     Str += "ARMv8.2a";
2856   else if (FBS[AArch64::HasV8_3aOps])
2857     Str += "ARMv8.3a";
2858   else if (FBS[AArch64::HasV8_4aOps])
2859     Str += "ARMv8.4a";
2860   else if (FBS[AArch64::HasV8_5aOps])
2861     Str += "ARMv8.5a";
2862   else {
2863     auto ext = std::find_if(std::begin(ExtensionMap),
2864       std::end(ExtensionMap),
2865       [&](const Extension& e)
2866       // Use & in case multiple features are enabled
2867       { return (FBS & e.Features) != FeatureBitset(); }
2868     );
2869 
2870     Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)";
2871   }
2872 }
2873 
2874 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2875                                       SMLoc S) {
2876   const uint16_t Op2 = Encoding & 7;
2877   const uint16_t Cm = (Encoding & 0x78) >> 3;
2878   const uint16_t Cn = (Encoding & 0x780) >> 7;
2879   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2880 
2881   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2882 
2883   Operands.push_back(
2884       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2885   Operands.push_back(
2886       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2887   Operands.push_back(
2888       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2889   Expr = MCConstantExpr::create(Op2, getContext());
2890   Operands.push_back(
2891       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2892 }
2893 
2894 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2895 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2896 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2897                                    OperandVector &Operands) {
2898   if (Name.find('.') != StringRef::npos)
2899     return TokError("invalid operand");
2900 
2901   Mnemonic = Name;
2902   Operands.push_back(
2903       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2904 
2905   MCAsmParser &Parser = getParser();
2906   const AsmToken &Tok = Parser.getTok();
2907   StringRef Op = Tok.getString();
2908   SMLoc S = Tok.getLoc();
2909 
2910   if (Mnemonic == "ic") {
2911     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2912     if (!IC)
2913       return TokError("invalid operand for IC instruction");
2914     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2915       std::string Str("IC " + std::string(IC->Name) + " requires ");
2916       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2917       return TokError(Str.c_str());
2918     }
2919     createSysAlias(IC->Encoding, Operands, S);
2920   } else if (Mnemonic == "dc") {
2921     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2922     if (!DC)
2923       return TokError("invalid operand for DC instruction");
2924     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2925       std::string Str("DC " + std::string(DC->Name) + " requires ");
2926       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2927       return TokError(Str.c_str());
2928     }
2929     createSysAlias(DC->Encoding, Operands, S);
2930   } else if (Mnemonic == "at") {
2931     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2932     if (!AT)
2933       return TokError("invalid operand for AT instruction");
2934     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2935       std::string Str("AT " + std::string(AT->Name) + " requires ");
2936       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2937       return TokError(Str.c_str());
2938     }
2939     createSysAlias(AT->Encoding, Operands, S);
2940   } else if (Mnemonic == "tlbi") {
2941     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2942     if (!TLBI)
2943       return TokError("invalid operand for TLBI instruction");
2944     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2945       std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2946       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2947       return TokError(Str.c_str());
2948     }
2949     createSysAlias(TLBI->Encoding, Operands, S);
2950   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
2951     const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
2952     if (!PRCTX)
2953       return TokError("invalid operand for prediction restriction instruction");
2954     else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
2955       std::string Str(
2956           Mnemonic.upper() + std::string(PRCTX->Name) + " requires ");
2957       setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
2958       return TokError(Str.c_str());
2959     }
2960     uint16_t PRCTX_Op2 =
2961       Mnemonic == "cfp" ? 4 :
2962       Mnemonic == "dvp" ? 5 :
2963       Mnemonic == "cpp" ? 7 :
2964       0;
2965     assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
2966     createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
2967   }
2968 
2969   Parser.Lex(); // Eat operand.
2970 
2971   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2972   bool HasRegister = false;
2973 
2974   // Check for the optional register operand.
2975   if (parseOptionalToken(AsmToken::Comma)) {
2976     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2977       return TokError("expected register operand");
2978     HasRegister = true;
2979   }
2980 
2981   if (ExpectRegister && !HasRegister)
2982     return TokError("specified " + Mnemonic + " op requires a register");
2983   else if (!ExpectRegister && HasRegister)
2984     return TokError("specified " + Mnemonic + " op does not use a register");
2985 
2986   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2987     return true;
2988 
2989   return false;
2990 }
2991 
2992 OperandMatchResultTy
2993 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2994   MCAsmParser &Parser = getParser();
2995   const AsmToken &Tok = Parser.getTok();
2996 
2997   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2998     TokError("'csync' operand expected");
2999     return MatchOperand_ParseFail;
3000   // Can be either a #imm style literal or an option name
3001   } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3002     // Immediate operand.
3003     const MCExpr *ImmVal;
3004     SMLoc ExprLoc = getLoc();
3005     if (getParser().parseExpression(ImmVal))
3006       return MatchOperand_ParseFail;
3007     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3008     if (!MCE) {
3009       Error(ExprLoc, "immediate value expected for barrier operand");
3010       return MatchOperand_ParseFail;
3011     }
3012     if (MCE->getValue() < 0 || MCE->getValue() > 15) {
3013       Error(ExprLoc, "barrier operand out of range");
3014       return MatchOperand_ParseFail;
3015     }
3016     auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
3017     Operands.push_back(AArch64Operand::CreateBarrier(
3018         MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
3019     return MatchOperand_Success;
3020   }
3021 
3022   if (Tok.isNot(AsmToken::Identifier)) {
3023     TokError("invalid operand for instruction");
3024     return MatchOperand_ParseFail;
3025   }
3026 
3027   auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
3028   // The only valid named option for ISB is 'sy'
3029   auto DB = AArch64DB::lookupDBByName(Tok.getString());
3030   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3031     TokError("'sy' or #imm operand expected");
3032     return MatchOperand_ParseFail;
3033   // The only valid named option for TSB is 'csync'
3034   } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3035     TokError("'csync' operand expected");
3036     return MatchOperand_ParseFail;
3037   } else if (!DB && !TSB) {
3038     TokError("invalid barrier option name");
3039     return MatchOperand_ParseFail;
3040   }
3041 
3042   Operands.push_back(AArch64Operand::CreateBarrier(
3043       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
3044   Parser.Lex(); // Consume the option
3045 
3046   return MatchOperand_Success;
3047 }
3048 
3049 OperandMatchResultTy
3050 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3051   MCAsmParser &Parser = getParser();
3052   const AsmToken &Tok = Parser.getTok();
3053 
3054   if (Tok.isNot(AsmToken::Identifier))
3055     return MatchOperand_NoMatch;
3056 
3057   int MRSReg, MSRReg;
3058   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3059   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3060     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3061     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3062   } else
3063     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3064 
3065   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3066   unsigned PStateImm = -1;
3067   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3068     PStateImm = PState->Encoding;
3069 
3070   Operands.push_back(
3071       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3072                                    PStateImm, getContext()));
3073   Parser.Lex(); // Eat identifier
3074 
3075   return MatchOperand_Success;
3076 }
3077 
3078 /// tryParseNeonVectorRegister - Parse a vector register operand.
3079 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3080   MCAsmParser &Parser = getParser();
3081   if (Parser.getTok().isNot(AsmToken::Identifier))
3082     return true;
3083 
3084   SMLoc S = getLoc();
3085   // Check for a vector register specifier first.
3086   StringRef Kind;
3087   unsigned Reg;
3088   OperandMatchResultTy Res =
3089       tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3090   if (Res != MatchOperand_Success)
3091     return true;
3092 
3093   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3094   if (!KindRes)
3095     return true;
3096 
3097   unsigned ElementWidth = KindRes->second;
3098   Operands.push_back(
3099       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3100                                       S, getLoc(), getContext()));
3101 
3102   // If there was an explicit qualifier, that goes on as a literal text
3103   // operand.
3104   if (!Kind.empty())
3105     Operands.push_back(
3106         AArch64Operand::CreateToken(Kind, false, S, getContext()));
3107 
3108   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3109 }
3110 
3111 OperandMatchResultTy
3112 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3113   SMLoc SIdx = getLoc();
3114   if (parseOptionalToken(AsmToken::LBrac)) {
3115     const MCExpr *ImmVal;
3116     if (getParser().parseExpression(ImmVal))
3117       return MatchOperand_NoMatch;
3118     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3119     if (!MCE) {
3120       TokError("immediate value expected for vector index");
3121       return MatchOperand_ParseFail;;
3122     }
3123 
3124     SMLoc E = getLoc();
3125 
3126     if (parseToken(AsmToken::RBrac, "']' expected"))
3127       return MatchOperand_ParseFail;;
3128 
3129     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3130                                                          E, getContext()));
3131     return MatchOperand_Success;
3132   }
3133 
3134   return MatchOperand_NoMatch;
3135 }
3136 
3137 // tryParseVectorRegister - Try to parse a vector register name with
3138 // optional kind specifier. If it is a register specifier, eat the token
3139 // and return it.
3140 OperandMatchResultTy
3141 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3142                                          RegKind MatchKind) {
3143   MCAsmParser &Parser = getParser();
3144   const AsmToken &Tok = Parser.getTok();
3145 
3146   if (Tok.isNot(AsmToken::Identifier))
3147     return MatchOperand_NoMatch;
3148 
3149   StringRef Name = Tok.getString();
3150   // If there is a kind specifier, it's separated from the register name by
3151   // a '.'.
3152   size_t Start = 0, Next = Name.find('.');
3153   StringRef Head = Name.slice(Start, Next);
3154   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3155 
3156   if (RegNum) {
3157     if (Next != StringRef::npos) {
3158       Kind = Name.slice(Next, StringRef::npos);
3159       if (!isValidVectorKind(Kind, MatchKind)) {
3160         TokError("invalid vector kind qualifier");
3161         return MatchOperand_ParseFail;
3162       }
3163     }
3164     Parser.Lex(); // Eat the register token.
3165 
3166     Reg = RegNum;
3167     return MatchOperand_Success;
3168   }
3169 
3170   return MatchOperand_NoMatch;
3171 }
3172 
3173 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3174 OperandMatchResultTy
3175 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3176   // Check for a SVE predicate register specifier first.
3177   const SMLoc S = getLoc();
3178   StringRef Kind;
3179   unsigned RegNum;
3180   auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3181   if (Res != MatchOperand_Success)
3182     return Res;
3183 
3184   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3185   if (!KindRes)
3186     return MatchOperand_NoMatch;
3187 
3188   unsigned ElementWidth = KindRes->second;
3189   Operands.push_back(AArch64Operand::CreateVectorReg(
3190       RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3191       getLoc(), getContext()));
3192 
3193   // Not all predicates are followed by a '/m' or '/z'.
3194   MCAsmParser &Parser = getParser();
3195   if (Parser.getTok().isNot(AsmToken::Slash))
3196     return MatchOperand_Success;
3197 
3198   // But when they do they shouldn't have an element type suffix.
3199   if (!Kind.empty()) {
3200     Error(S, "not expecting size suffix");
3201     return MatchOperand_ParseFail;
3202   }
3203 
3204   // Add a literal slash as operand
3205   Operands.push_back(
3206       AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3207 
3208   Parser.Lex(); // Eat the slash.
3209 
3210   // Zeroing or merging?
3211   auto Pred = Parser.getTok().getString().lower();
3212   if (Pred != "z" && Pred != "m") {
3213     Error(getLoc(), "expecting 'm' or 'z' predication");
3214     return MatchOperand_ParseFail;
3215   }
3216 
3217   // Add zero/merge token.
3218   const char *ZM = Pred == "z" ? "z" : "m";
3219   Operands.push_back(
3220     AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3221 
3222   Parser.Lex(); // Eat zero/merge token.
3223   return MatchOperand_Success;
3224 }
3225 
3226 /// parseRegister - Parse a register operand.
3227 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3228   // Try for a Neon vector register.
3229   if (!tryParseNeonVectorRegister(Operands))
3230     return false;
3231 
3232   // Otherwise try for a scalar register.
3233   if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3234     return false;
3235 
3236   return true;
3237 }
3238 
3239 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3240   MCAsmParser &Parser = getParser();
3241   bool HasELFModifier = false;
3242   AArch64MCExpr::VariantKind RefKind;
3243 
3244   if (parseOptionalToken(AsmToken::Colon)) {
3245     HasELFModifier = true;
3246 
3247     if (Parser.getTok().isNot(AsmToken::Identifier))
3248       return TokError("expect relocation specifier in operand after ':'");
3249 
3250     std::string LowerCase = Parser.getTok().getIdentifier().lower();
3251     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3252                   .Case("lo12", AArch64MCExpr::VK_LO12)
3253                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3254                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3255                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3256                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3257                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3258                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3259                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3260                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3261                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3262                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3263                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3264                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3265                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3266                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3267                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3268                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3269                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3270                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3271                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3272                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3273                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3274                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3275                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3276                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3277                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3278                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3279                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3280                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3281                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3282                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3283                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3284                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3285                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3286                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3287                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3288                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3289                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3290                   .Default(AArch64MCExpr::VK_INVALID);
3291 
3292     if (RefKind == AArch64MCExpr::VK_INVALID)
3293       return TokError("expect relocation specifier in operand after ':'");
3294 
3295     Parser.Lex(); // Eat identifier
3296 
3297     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3298       return true;
3299   }
3300 
3301   if (getParser().parseExpression(ImmVal))
3302     return true;
3303 
3304   if (HasELFModifier)
3305     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3306 
3307   return false;
3308 }
3309 
3310 template <RegKind VectorKind>
3311 OperandMatchResultTy
3312 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3313                                      bool ExpectMatch) {
3314   MCAsmParser &Parser = getParser();
3315   if (!Parser.getTok().is(AsmToken::LCurly))
3316     return MatchOperand_NoMatch;
3317 
3318   // Wrapper around parse function
3319   auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3320                                      bool NoMatchIsError) {
3321     auto RegTok = Parser.getTok();
3322     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3323     if (ParseRes == MatchOperand_Success) {
3324       if (parseVectorKind(Kind, VectorKind))
3325         return ParseRes;
3326       llvm_unreachable("Expected a valid vector kind");
3327     }
3328 
3329     if (RegTok.isNot(AsmToken::Identifier) ||
3330         ParseRes == MatchOperand_ParseFail ||
3331         (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3332       Error(Loc, "vector register expected");
3333       return MatchOperand_ParseFail;
3334     }
3335 
3336     return MatchOperand_NoMatch;
3337   };
3338 
3339   SMLoc S = getLoc();
3340   auto LCurly = Parser.getTok();
3341   Parser.Lex(); // Eat left bracket token.
3342 
3343   StringRef Kind;
3344   unsigned FirstReg;
3345   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3346 
3347   // Put back the original left bracket if there was no match, so that
3348   // different types of list-operands can be matched (e.g. SVE, Neon).
3349   if (ParseRes == MatchOperand_NoMatch)
3350     Parser.getLexer().UnLex(LCurly);
3351 
3352   if (ParseRes != MatchOperand_Success)
3353     return ParseRes;
3354 
3355   int64_t PrevReg = FirstReg;
3356   unsigned Count = 1;
3357 
3358   if (parseOptionalToken(AsmToken::Minus)) {
3359     SMLoc Loc = getLoc();
3360     StringRef NextKind;
3361 
3362     unsigned Reg;
3363     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3364     if (ParseRes != MatchOperand_Success)
3365       return ParseRes;
3366 
3367     // Any Kind suffices must match on all regs in the list.
3368     if (Kind != NextKind) {
3369       Error(Loc, "mismatched register size suffix");
3370       return MatchOperand_ParseFail;
3371     }
3372 
3373     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3374 
3375     if (Space == 0 || Space > 3) {
3376       Error(Loc, "invalid number of vectors");
3377       return MatchOperand_ParseFail;
3378     }
3379 
3380     Count += Space;
3381   }
3382   else {
3383     while (parseOptionalToken(AsmToken::Comma)) {
3384       SMLoc Loc = getLoc();
3385       StringRef NextKind;
3386       unsigned Reg;
3387       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3388       if (ParseRes != MatchOperand_Success)
3389         return ParseRes;
3390 
3391       // Any Kind suffices must match on all regs in the list.
3392       if (Kind != NextKind) {
3393         Error(Loc, "mismatched register size suffix");
3394         return MatchOperand_ParseFail;
3395       }
3396 
3397       // Registers must be incremental (with wraparound at 31)
3398       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3399           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3400         Error(Loc, "registers must be sequential");
3401         return MatchOperand_ParseFail;
3402       }
3403 
3404       PrevReg = Reg;
3405       ++Count;
3406     }
3407   }
3408 
3409   if (parseToken(AsmToken::RCurly, "'}' expected"))
3410     return MatchOperand_ParseFail;
3411 
3412   if (Count > 4) {
3413     Error(S, "invalid number of vectors");
3414     return MatchOperand_ParseFail;
3415   }
3416 
3417   unsigned NumElements = 0;
3418   unsigned ElementWidth = 0;
3419   if (!Kind.empty()) {
3420     if (const auto &VK = parseVectorKind(Kind, VectorKind))
3421       std::tie(NumElements, ElementWidth) = *VK;
3422   }
3423 
3424   Operands.push_back(AArch64Operand::CreateVectorList(
3425       FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3426       getContext()));
3427 
3428   return MatchOperand_Success;
3429 }
3430 
3431 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3432 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3433   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3434   if (ParseRes != MatchOperand_Success)
3435     return true;
3436 
3437   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3438 }
3439 
3440 OperandMatchResultTy
3441 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3442   SMLoc StartLoc = getLoc();
3443 
3444   unsigned RegNum;
3445   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3446   if (Res != MatchOperand_Success)
3447     return Res;
3448 
3449   if (!parseOptionalToken(AsmToken::Comma)) {
3450     Operands.push_back(AArch64Operand::CreateReg(
3451         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3452     return MatchOperand_Success;
3453   }
3454 
3455   parseOptionalToken(AsmToken::Hash);
3456 
3457   if (getParser().getTok().isNot(AsmToken::Integer)) {
3458     Error(getLoc(), "index must be absent or #0");
3459     return MatchOperand_ParseFail;
3460   }
3461 
3462   const MCExpr *ImmVal;
3463   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3464       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3465     Error(getLoc(), "index must be absent or #0");
3466     return MatchOperand_ParseFail;
3467   }
3468 
3469   Operands.push_back(AArch64Operand::CreateReg(
3470       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3471   return MatchOperand_Success;
3472 }
3473 
3474 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3475 OperandMatchResultTy
3476 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3477   SMLoc StartLoc = getLoc();
3478 
3479   unsigned RegNum;
3480   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3481   if (Res != MatchOperand_Success)
3482     return Res;
3483 
3484   // No shift/extend is the default.
3485   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3486     Operands.push_back(AArch64Operand::CreateReg(
3487         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3488     return MatchOperand_Success;
3489   }
3490 
3491   // Eat the comma
3492   getParser().Lex();
3493 
3494   // Match the shift
3495   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3496   Res = tryParseOptionalShiftExtend(ExtOpnd);
3497   if (Res != MatchOperand_Success)
3498     return Res;
3499 
3500   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3501   Operands.push_back(AArch64Operand::CreateReg(
3502       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3503       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3504       Ext->hasShiftExtendAmount()));
3505 
3506   return MatchOperand_Success;
3507 }
3508 
3509 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3510   MCAsmParser &Parser = getParser();
3511 
3512   // Some SVE instructions have a decoration after the immediate, i.e.
3513   // "mul vl". We parse them here and add tokens, which must be present in the
3514   // asm string in the tablegen instruction.
3515   bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3516   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3517   if (!Parser.getTok().getString().equals_lower("mul") ||
3518       !(NextIsVL || NextIsHash))
3519     return true;
3520 
3521   Operands.push_back(
3522     AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3523   Parser.Lex(); // Eat the "mul"
3524 
3525   if (NextIsVL) {
3526     Operands.push_back(
3527         AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3528     Parser.Lex(); // Eat the "vl"
3529     return false;
3530   }
3531 
3532   if (NextIsHash) {
3533     Parser.Lex(); // Eat the #
3534     SMLoc S = getLoc();
3535 
3536     // Parse immediate operand.
3537     const MCExpr *ImmVal;
3538     if (!Parser.parseExpression(ImmVal))
3539       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3540         Operands.push_back(AArch64Operand::CreateImm(
3541             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3542             getContext()));
3543         return MatchOperand_Success;
3544       }
3545   }
3546 
3547   return Error(getLoc(), "expected 'vl' or '#<imm>'");
3548 }
3549 
3550 /// parseOperand - Parse a arm instruction operand.  For now this parses the
3551 /// operand regardless of the mnemonic.
3552 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3553                                   bool invertCondCode) {
3554   MCAsmParser &Parser = getParser();
3555 
3556   OperandMatchResultTy ResTy =
3557       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3558 
3559   // Check if the current operand has a custom associated parser, if so, try to
3560   // custom parse the operand, or fallback to the general approach.
3561   if (ResTy == MatchOperand_Success)
3562     return false;
3563   // If there wasn't a custom match, try the generic matcher below. Otherwise,
3564   // there was a match, but an error occurred, in which case, just return that
3565   // the operand parsing failed.
3566   if (ResTy == MatchOperand_ParseFail)
3567     return true;
3568 
3569   // Nothing custom, so do general case parsing.
3570   SMLoc S, E;
3571   switch (getLexer().getKind()) {
3572   default: {
3573     SMLoc S = getLoc();
3574     const MCExpr *Expr;
3575     if (parseSymbolicImmVal(Expr))
3576       return Error(S, "invalid operand");
3577 
3578     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3579     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3580     return false;
3581   }
3582   case AsmToken::LBrac: {
3583     SMLoc Loc = Parser.getTok().getLoc();
3584     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3585                                                    getContext()));
3586     Parser.Lex(); // Eat '['
3587 
3588     // There's no comma after a '[', so we can parse the next operand
3589     // immediately.
3590     return parseOperand(Operands, false, false);
3591   }
3592   case AsmToken::LCurly:
3593     return parseNeonVectorList(Operands);
3594   case AsmToken::Identifier: {
3595     // If we're expecting a Condition Code operand, then just parse that.
3596     if (isCondCode)
3597       return parseCondCode(Operands, invertCondCode);
3598 
3599     // If it's a register name, parse it.
3600     if (!parseRegister(Operands))
3601       return false;
3602 
3603     // See if this is a "mul vl" decoration or "mul #<int>" operand used
3604     // by SVE instructions.
3605     if (!parseOptionalMulOperand(Operands))
3606       return false;
3607 
3608     // This could be an optional "shift" or "extend" operand.
3609     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3610     // We can only continue if no tokens were eaten.
3611     if (GotShift != MatchOperand_NoMatch)
3612       return GotShift;
3613 
3614     // This was not a register so parse other operands that start with an
3615     // identifier (like labels) as expressions and create them as immediates.
3616     const MCExpr *IdVal;
3617     S = getLoc();
3618     if (getParser().parseExpression(IdVal))
3619       return true;
3620     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3621     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3622     return false;
3623   }
3624   case AsmToken::Integer:
3625   case AsmToken::Real:
3626   case AsmToken::Hash: {
3627     // #42 -> immediate.
3628     S = getLoc();
3629 
3630     parseOptionalToken(AsmToken::Hash);
3631 
3632     // Parse a negative sign
3633     bool isNegative = false;
3634     if (Parser.getTok().is(AsmToken::Minus)) {
3635       isNegative = true;
3636       // We need to consume this token only when we have a Real, otherwise
3637       // we let parseSymbolicImmVal take care of it
3638       if (Parser.getLexer().peekTok().is(AsmToken::Real))
3639         Parser.Lex();
3640     }
3641 
3642     // The only Real that should come through here is a literal #0.0 for
3643     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3644     // so convert the value.
3645     const AsmToken &Tok = Parser.getTok();
3646     if (Tok.is(AsmToken::Real)) {
3647       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3648       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3649       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3650           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3651           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3652         return TokError("unexpected floating point literal");
3653       else if (IntVal != 0 || isNegative)
3654         return TokError("expected floating-point constant #0.0");
3655       Parser.Lex(); // Eat the token.
3656 
3657       Operands.push_back(
3658           AArch64Operand::CreateToken("#0", false, S, getContext()));
3659       Operands.push_back(
3660           AArch64Operand::CreateToken(".0", false, S, getContext()));
3661       return false;
3662     }
3663 
3664     const MCExpr *ImmVal;
3665     if (parseSymbolicImmVal(ImmVal))
3666       return true;
3667 
3668     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3669     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3670     return false;
3671   }
3672   case AsmToken::Equal: {
3673     SMLoc Loc = getLoc();
3674     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3675       return TokError("unexpected token in operand");
3676     Parser.Lex(); // Eat '='
3677     const MCExpr *SubExprVal;
3678     if (getParser().parseExpression(SubExprVal))
3679       return true;
3680 
3681     if (Operands.size() < 2 ||
3682         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3683       return Error(Loc, "Only valid when first operand is register");
3684 
3685     bool IsXReg =
3686         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3687             Operands[1]->getReg());
3688 
3689     MCContext& Ctx = getContext();
3690     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3691     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3692     if (isa<MCConstantExpr>(SubExprVal)) {
3693       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3694       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3695       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3696         ShiftAmt += 16;
3697         Imm >>= 16;
3698       }
3699       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3700           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3701           Operands.push_back(AArch64Operand::CreateImm(
3702                      MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3703         if (ShiftAmt)
3704           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3705                      ShiftAmt, true, S, E, Ctx));
3706         return false;
3707       }
3708       APInt Simm = APInt(64, Imm << ShiftAmt);
3709       // check if the immediate is an unsigned or signed 32-bit int for W regs
3710       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3711         return Error(Loc, "Immediate too large for register");
3712     }
3713     // If it is a label or an imm that cannot fit in a movz, put it into CP.
3714     const MCExpr *CPLoc =
3715         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3716     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3717     return false;
3718   }
3719   }
3720 }
3721 
3722 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3723                                  const MCParsedAsmOperand &Op2) const {
3724   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3725   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3726   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3727       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3728     return MCTargetAsmParser::regsEqual(Op1, Op2);
3729 
3730   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3731          "Testing equality of non-scalar registers not supported");
3732 
3733   // Check if a registers match their sub/super register classes.
3734   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3735     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3736   if (AOp1.getRegEqualityTy() == EqualsSubReg)
3737     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3738   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3739     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3740   if (AOp2.getRegEqualityTy() == EqualsSubReg)
3741     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3742 
3743   return false;
3744 }
3745 
3746 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3747 /// operands.
3748 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3749                                         StringRef Name, SMLoc NameLoc,
3750                                         OperandVector &Operands) {
3751   MCAsmParser &Parser = getParser();
3752   Name = StringSwitch<StringRef>(Name.lower())
3753              .Case("beq", "b.eq")
3754              .Case("bne", "b.ne")
3755              .Case("bhs", "b.hs")
3756              .Case("bcs", "b.cs")
3757              .Case("blo", "b.lo")
3758              .Case("bcc", "b.cc")
3759              .Case("bmi", "b.mi")
3760              .Case("bpl", "b.pl")
3761              .Case("bvs", "b.vs")
3762              .Case("bvc", "b.vc")
3763              .Case("bhi", "b.hi")
3764              .Case("bls", "b.ls")
3765              .Case("bge", "b.ge")
3766              .Case("blt", "b.lt")
3767              .Case("bgt", "b.gt")
3768              .Case("ble", "b.le")
3769              .Case("bal", "b.al")
3770              .Case("bnv", "b.nv")
3771              .Default(Name);
3772 
3773   // First check for the AArch64-specific .req directive.
3774   if (Parser.getTok().is(AsmToken::Identifier) &&
3775       Parser.getTok().getIdentifier() == ".req") {
3776     parseDirectiveReq(Name, NameLoc);
3777     // We always return 'error' for this, as we're done with this
3778     // statement and don't need to match the 'instruction."
3779     return true;
3780   }
3781 
3782   // Create the leading tokens for the mnemonic, split by '.' characters.
3783   size_t Start = 0, Next = Name.find('.');
3784   StringRef Head = Name.slice(Start, Next);
3785 
3786   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
3787   // the SYS instruction.
3788   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
3789       Head == "cfp" || Head == "dvp" || Head == "cpp")
3790     return parseSysAlias(Head, NameLoc, Operands);
3791 
3792   Operands.push_back(
3793       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3794   Mnemonic = Head;
3795 
3796   // Handle condition codes for a branch mnemonic
3797   if (Head == "b" && Next != StringRef::npos) {
3798     Start = Next;
3799     Next = Name.find('.', Start + 1);
3800     Head = Name.slice(Start + 1, Next);
3801 
3802     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3803                                             (Head.data() - Name.data()));
3804     AArch64CC::CondCode CC = parseCondCodeString(Head);
3805     if (CC == AArch64CC::Invalid)
3806       return Error(SuffixLoc, "invalid condition code");
3807     Operands.push_back(
3808         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3809     Operands.push_back(
3810         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3811   }
3812 
3813   // Add the remaining tokens in the mnemonic.
3814   while (Next != StringRef::npos) {
3815     Start = Next;
3816     Next = Name.find('.', Start + 1);
3817     Head = Name.slice(Start, Next);
3818     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3819                                             (Head.data() - Name.data()) + 1);
3820     Operands.push_back(
3821         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3822   }
3823 
3824   // Conditional compare instructions have a Condition Code operand, which needs
3825   // to be parsed and an immediate operand created.
3826   bool condCodeFourthOperand =
3827       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3828        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3829        Head == "csinc" || Head == "csinv" || Head == "csneg");
3830 
3831   // These instructions are aliases to some of the conditional select
3832   // instructions. However, the condition code is inverted in the aliased
3833   // instruction.
3834   //
3835   // FIXME: Is this the correct way to handle these? Or should the parser
3836   //        generate the aliased instructions directly?
3837   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3838   bool condCodeThirdOperand =
3839       (Head == "cinc" || Head == "cinv" || Head == "cneg");
3840 
3841   // Read the remaining operands.
3842   if (getLexer().isNot(AsmToken::EndOfStatement)) {
3843 
3844     unsigned N = 1;
3845     do {
3846       // Parse and remember the operand.
3847       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3848                                      (N == 3 && condCodeThirdOperand) ||
3849                                      (N == 2 && condCodeSecondOperand),
3850                        condCodeSecondOperand || condCodeThirdOperand)) {
3851         return true;
3852       }
3853 
3854       // After successfully parsing some operands there are two special cases to
3855       // consider (i.e. notional operands not separated by commas). Both are due
3856       // to memory specifiers:
3857       //  + An RBrac will end an address for load/store/prefetch
3858       //  + An '!' will indicate a pre-indexed operation.
3859       //
3860       // It's someone else's responsibility to make sure these tokens are sane
3861       // in the given context!
3862 
3863       SMLoc RLoc = Parser.getTok().getLoc();
3864       if (parseOptionalToken(AsmToken::RBrac))
3865         Operands.push_back(
3866             AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3867       SMLoc ELoc = Parser.getTok().getLoc();
3868       if (parseOptionalToken(AsmToken::Exclaim))
3869         Operands.push_back(
3870             AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3871 
3872       ++N;
3873     } while (parseOptionalToken(AsmToken::Comma));
3874   }
3875 
3876   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3877     return true;
3878 
3879   return false;
3880 }
3881 
3882 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3883   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3884   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3885          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3886          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3887          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3888          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3889          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3890 }
3891 
3892 // FIXME: This entire function is a giant hack to provide us with decent
3893 // operand range validation/diagnostics until TableGen/MC can be extended
3894 // to support autogeneration of this kind of validation.
3895 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3896                                            SmallVectorImpl<SMLoc> &Loc) {
3897   const MCRegisterInfo *RI = getContext().getRegisterInfo();
3898   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3899 
3900   // A prefix only applies to the instruction following it.  Here we extract
3901   // prefix information for the next instruction before validating the current
3902   // one so that in the case of failure we don't erronously continue using the
3903   // current prefix.
3904   PrefixInfo Prefix = NextPrefix;
3905   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3906 
3907   // Before validating the instruction in isolation we run through the rules
3908   // applicable when it follows a prefix instruction.
3909   // NOTE: brk & hlt can be prefixed but require no additional validation.
3910   if (Prefix.isActive() &&
3911       (Inst.getOpcode() != AArch64::BRK) &&
3912       (Inst.getOpcode() != AArch64::HLT)) {
3913 
3914     // Prefixed intructions must have a destructive operand.
3915     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3916         AArch64::NotDestructive)
3917       return Error(IDLoc, "instruction is unpredictable when following a"
3918                    " movprfx, suggest replacing movprfx with mov");
3919 
3920     // Destination operands must match.
3921     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3922       return Error(Loc[0], "instruction is unpredictable when following a"
3923                    " movprfx writing to a different destination");
3924 
3925     // Destination operand must not be used in any other location.
3926     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3927       if (Inst.getOperand(i).isReg() &&
3928           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3929           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3930         return Error(Loc[0], "instruction is unpredictable when following a"
3931                      " movprfx and destination also used as non-destructive"
3932                      " source");
3933     }
3934 
3935     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3936     if (Prefix.isPredicated()) {
3937       int PgIdx = -1;
3938 
3939       // Find the instructions general predicate.
3940       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3941         if (Inst.getOperand(i).isReg() &&
3942             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3943           PgIdx = i;
3944           break;
3945         }
3946 
3947       // Instruction must be predicated if the movprfx is predicated.
3948       if (PgIdx == -1 ||
3949           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3950         return Error(IDLoc, "instruction is unpredictable when following a"
3951                      " predicated movprfx, suggest using unpredicated movprfx");
3952 
3953       // Instruction must use same general predicate as the movprfx.
3954       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3955         return Error(IDLoc, "instruction is unpredictable when following a"
3956                      " predicated movprfx using a different general predicate");
3957 
3958       // Instruction element type must match the movprfx.
3959       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3960         return Error(IDLoc, "instruction is unpredictable when following a"
3961                      " predicated movprfx with a different element size");
3962     }
3963   }
3964 
3965   // Check for indexed addressing modes w/ the base register being the
3966   // same as a destination/source register or pair load where
3967   // the Rt == Rt2. All of those are undefined behaviour.
3968   switch (Inst.getOpcode()) {
3969   case AArch64::LDPSWpre:
3970   case AArch64::LDPWpost:
3971   case AArch64::LDPWpre:
3972   case AArch64::LDPXpost:
3973   case AArch64::LDPXpre: {
3974     unsigned Rt = Inst.getOperand(1).getReg();
3975     unsigned Rt2 = Inst.getOperand(2).getReg();
3976     unsigned Rn = Inst.getOperand(3).getReg();
3977     if (RI->isSubRegisterEq(Rn, Rt))
3978       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3979                            "is also a destination");
3980     if (RI->isSubRegisterEq(Rn, Rt2))
3981       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3982                            "is also a destination");
3983     LLVM_FALLTHROUGH;
3984   }
3985   case AArch64::LDPDi:
3986   case AArch64::LDPQi:
3987   case AArch64::LDPSi:
3988   case AArch64::LDPSWi:
3989   case AArch64::LDPWi:
3990   case AArch64::LDPXi: {
3991     unsigned Rt = Inst.getOperand(0).getReg();
3992     unsigned Rt2 = Inst.getOperand(1).getReg();
3993     if (Rt == Rt2)
3994       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3995     break;
3996   }
3997   case AArch64::LDPDpost:
3998   case AArch64::LDPDpre:
3999   case AArch64::LDPQpost:
4000   case AArch64::LDPQpre:
4001   case AArch64::LDPSpost:
4002   case AArch64::LDPSpre:
4003   case AArch64::LDPSWpost: {
4004     unsigned Rt = Inst.getOperand(1).getReg();
4005     unsigned Rt2 = Inst.getOperand(2).getReg();
4006     if (Rt == Rt2)
4007       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4008     break;
4009   }
4010   case AArch64::STPDpost:
4011   case AArch64::STPDpre:
4012   case AArch64::STPQpost:
4013   case AArch64::STPQpre:
4014   case AArch64::STPSpost:
4015   case AArch64::STPSpre:
4016   case AArch64::STPWpost:
4017   case AArch64::STPWpre:
4018   case AArch64::STPXpost:
4019   case AArch64::STPXpre: {
4020     unsigned Rt = Inst.getOperand(1).getReg();
4021     unsigned Rt2 = Inst.getOperand(2).getReg();
4022     unsigned Rn = Inst.getOperand(3).getReg();
4023     if (RI->isSubRegisterEq(Rn, Rt))
4024       return Error(Loc[0], "unpredictable STP instruction, writeback base "
4025                            "is also a source");
4026     if (RI->isSubRegisterEq(Rn, Rt2))
4027       return Error(Loc[1], "unpredictable STP instruction, writeback base "
4028                            "is also a source");
4029     break;
4030   }
4031   case AArch64::LDRBBpre:
4032   case AArch64::LDRBpre:
4033   case AArch64::LDRHHpre:
4034   case AArch64::LDRHpre:
4035   case AArch64::LDRSBWpre:
4036   case AArch64::LDRSBXpre:
4037   case AArch64::LDRSHWpre:
4038   case AArch64::LDRSHXpre:
4039   case AArch64::LDRSWpre:
4040   case AArch64::LDRWpre:
4041   case AArch64::LDRXpre:
4042   case AArch64::LDRBBpost:
4043   case AArch64::LDRBpost:
4044   case AArch64::LDRHHpost:
4045   case AArch64::LDRHpost:
4046   case AArch64::LDRSBWpost:
4047   case AArch64::LDRSBXpost:
4048   case AArch64::LDRSHWpost:
4049   case AArch64::LDRSHXpost:
4050   case AArch64::LDRSWpost:
4051   case AArch64::LDRWpost:
4052   case AArch64::LDRXpost: {
4053     unsigned Rt = Inst.getOperand(1).getReg();
4054     unsigned Rn = Inst.getOperand(2).getReg();
4055     if (RI->isSubRegisterEq(Rn, Rt))
4056       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4057                            "is also a source");
4058     break;
4059   }
4060   case AArch64::STRBBpost:
4061   case AArch64::STRBpost:
4062   case AArch64::STRHHpost:
4063   case AArch64::STRHpost:
4064   case AArch64::STRWpost:
4065   case AArch64::STRXpost:
4066   case AArch64::STRBBpre:
4067   case AArch64::STRBpre:
4068   case AArch64::STRHHpre:
4069   case AArch64::STRHpre:
4070   case AArch64::STRWpre:
4071   case AArch64::STRXpre: {
4072     unsigned Rt = Inst.getOperand(1).getReg();
4073     unsigned Rn = Inst.getOperand(2).getReg();
4074     if (RI->isSubRegisterEq(Rn, Rt))
4075       return Error(Loc[0], "unpredictable STR instruction, writeback base "
4076                            "is also a source");
4077     break;
4078   }
4079   case AArch64::STXRB:
4080   case AArch64::STXRH:
4081   case AArch64::STXRW:
4082   case AArch64::STXRX:
4083   case AArch64::STLXRB:
4084   case AArch64::STLXRH:
4085   case AArch64::STLXRW:
4086   case AArch64::STLXRX: {
4087     unsigned Rs = Inst.getOperand(0).getReg();
4088     unsigned Rt = Inst.getOperand(1).getReg();
4089     unsigned Rn = Inst.getOperand(2).getReg();
4090     if (RI->isSubRegisterEq(Rt, Rs) ||
4091         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4092       return Error(Loc[0],
4093                    "unpredictable STXR instruction, status is also a source");
4094     break;
4095   }
4096   case AArch64::STXPW:
4097   case AArch64::STXPX:
4098   case AArch64::STLXPW:
4099   case AArch64::STLXPX: {
4100     unsigned Rs = Inst.getOperand(0).getReg();
4101     unsigned Rt1 = Inst.getOperand(1).getReg();
4102     unsigned Rt2 = Inst.getOperand(2).getReg();
4103     unsigned Rn = Inst.getOperand(3).getReg();
4104     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4105         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4106       return Error(Loc[0],
4107                    "unpredictable STXP instruction, status is also a source");
4108     break;
4109   }
4110   }
4111 
4112 
4113   // Now check immediate ranges. Separate from the above as there is overlap
4114   // in the instructions being checked and this keeps the nested conditionals
4115   // to a minimum.
4116   switch (Inst.getOpcode()) {
4117   case AArch64::ADDSWri:
4118   case AArch64::ADDSXri:
4119   case AArch64::ADDWri:
4120   case AArch64::ADDXri:
4121   case AArch64::SUBSWri:
4122   case AArch64::SUBSXri:
4123   case AArch64::SUBWri:
4124   case AArch64::SUBXri: {
4125     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4126     // some slight duplication here.
4127     if (Inst.getOperand(2).isExpr()) {
4128       const MCExpr *Expr = Inst.getOperand(2).getExpr();
4129       AArch64MCExpr::VariantKind ELFRefKind;
4130       MCSymbolRefExpr::VariantKind DarwinRefKind;
4131       int64_t Addend;
4132       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4133 
4134         // Only allow these with ADDXri.
4135         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4136              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4137             Inst.getOpcode() == AArch64::ADDXri)
4138           return false;
4139 
4140         // Only allow these with ADDXri/ADDWri
4141         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4142              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4143              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4144              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4145              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4146              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4147              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4148              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4149              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4150              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4151             (Inst.getOpcode() == AArch64::ADDXri ||
4152              Inst.getOpcode() == AArch64::ADDWri))
4153           return false;
4154 
4155         // Don't allow symbol refs in the immediate field otherwise
4156         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4157         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4158         // 'cmp w0, 'borked')
4159         return Error(Loc.back(), "invalid immediate expression");
4160       }
4161       // We don't validate more complex expressions here
4162     }
4163     return false;
4164   }
4165   default:
4166     return false;
4167   }
4168 }
4169 
4170 static std::string AArch64MnemonicSpellCheck(StringRef S,
4171                                              const FeatureBitset &FBS,
4172                                              unsigned VariantID = 0);
4173 
4174 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4175                                       uint64_t ErrorInfo,
4176                                       OperandVector &Operands) {
4177   switch (ErrCode) {
4178   case Match_InvalidTiedOperand: {
4179     RegConstraintEqualityTy EqTy =
4180         static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4181             .getRegEqualityTy();
4182     switch (EqTy) {
4183     case RegConstraintEqualityTy::EqualsSubReg:
4184       return Error(Loc, "operand must be 64-bit form of destination register");
4185     case RegConstraintEqualityTy::EqualsSuperReg:
4186       return Error(Loc, "operand must be 32-bit form of destination register");
4187     case RegConstraintEqualityTy::EqualsReg:
4188       return Error(Loc, "operand must match destination register");
4189     }
4190     llvm_unreachable("Unknown RegConstraintEqualityTy");
4191   }
4192   case Match_MissingFeature:
4193     return Error(Loc,
4194                  "instruction requires a CPU feature not currently enabled");
4195   case Match_InvalidOperand:
4196     return Error(Loc, "invalid operand for instruction");
4197   case Match_InvalidSuffix:
4198     return Error(Loc, "invalid type suffix for instruction");
4199   case Match_InvalidCondCode:
4200     return Error(Loc, "expected AArch64 condition code");
4201   case Match_AddSubRegExtendSmall:
4202     return Error(Loc,
4203       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4204   case Match_AddSubRegExtendLarge:
4205     return Error(Loc,
4206       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4207   case Match_AddSubSecondSource:
4208     return Error(Loc,
4209       "expected compatible register, symbol or integer in range [0, 4095]");
4210   case Match_LogicalSecondSource:
4211     return Error(Loc, "expected compatible register or logical immediate");
4212   case Match_InvalidMovImm32Shift:
4213     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4214   case Match_InvalidMovImm64Shift:
4215     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4216   case Match_AddSubRegShift32:
4217     return Error(Loc,
4218        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4219   case Match_AddSubRegShift64:
4220     return Error(Loc,
4221        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4222   case Match_InvalidFPImm:
4223     return Error(Loc,
4224                  "expected compatible register or floating-point constant");
4225   case Match_InvalidMemoryIndexedSImm6:
4226     return Error(Loc, "index must be an integer in range [-32, 31].");
4227   case Match_InvalidMemoryIndexedSImm5:
4228     return Error(Loc, "index must be an integer in range [-16, 15].");
4229   case Match_InvalidMemoryIndexed1SImm4:
4230     return Error(Loc, "index must be an integer in range [-8, 7].");
4231   case Match_InvalidMemoryIndexed2SImm4:
4232     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4233   case Match_InvalidMemoryIndexed3SImm4:
4234     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4235   case Match_InvalidMemoryIndexed4SImm4:
4236     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4237   case Match_InvalidMemoryIndexed16SImm4:
4238     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4239   case Match_InvalidMemoryIndexed1SImm6:
4240     return Error(Loc, "index must be an integer in range [-32, 31].");
4241   case Match_InvalidMemoryIndexedSImm8:
4242     return Error(Loc, "index must be an integer in range [-128, 127].");
4243   case Match_InvalidMemoryIndexedSImm9:
4244     return Error(Loc, "index must be an integer in range [-256, 255].");
4245   case Match_InvalidMemoryIndexed16SImm9:
4246     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
4247   case Match_InvalidMemoryIndexed8SImm10:
4248     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4249   case Match_InvalidMemoryIndexed4SImm7:
4250     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4251   case Match_InvalidMemoryIndexed8SImm7:
4252     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4253   case Match_InvalidMemoryIndexed16SImm7:
4254     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4255   case Match_InvalidMemoryIndexed8UImm5:
4256     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4257   case Match_InvalidMemoryIndexed4UImm5:
4258     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4259   case Match_InvalidMemoryIndexed2UImm5:
4260     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4261   case Match_InvalidMemoryIndexed8UImm6:
4262     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4263   case Match_InvalidMemoryIndexed16UImm6:
4264     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
4265   case Match_InvalidMemoryIndexed4UImm6:
4266     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4267   case Match_InvalidMemoryIndexed2UImm6:
4268     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4269   case Match_InvalidMemoryIndexed1UImm6:
4270     return Error(Loc, "index must be in range [0, 63].");
4271   case Match_InvalidMemoryWExtend8:
4272     return Error(Loc,
4273                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
4274   case Match_InvalidMemoryWExtend16:
4275     return Error(Loc,
4276                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4277   case Match_InvalidMemoryWExtend32:
4278     return Error(Loc,
4279                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4280   case Match_InvalidMemoryWExtend64:
4281     return Error(Loc,
4282                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4283   case Match_InvalidMemoryWExtend128:
4284     return Error(Loc,
4285                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4286   case Match_InvalidMemoryXExtend8:
4287     return Error(Loc,
4288                  "expected 'lsl' or 'sxtx' with optional shift of #0");
4289   case Match_InvalidMemoryXExtend16:
4290     return Error(Loc,
4291                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4292   case Match_InvalidMemoryXExtend32:
4293     return Error(Loc,
4294                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4295   case Match_InvalidMemoryXExtend64:
4296     return Error(Loc,
4297                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4298   case Match_InvalidMemoryXExtend128:
4299     return Error(Loc,
4300                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4301   case Match_InvalidMemoryIndexed1:
4302     return Error(Loc, "index must be an integer in range [0, 4095].");
4303   case Match_InvalidMemoryIndexed2:
4304     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4305   case Match_InvalidMemoryIndexed4:
4306     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4307   case Match_InvalidMemoryIndexed8:
4308     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4309   case Match_InvalidMemoryIndexed16:
4310     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4311   case Match_InvalidImm0_1:
4312     return Error(Loc, "immediate must be an integer in range [0, 1].");
4313   case Match_InvalidImm0_7:
4314     return Error(Loc, "immediate must be an integer in range [0, 7].");
4315   case Match_InvalidImm0_15:
4316     return Error(Loc, "immediate must be an integer in range [0, 15].");
4317   case Match_InvalidImm0_31:
4318     return Error(Loc, "immediate must be an integer in range [0, 31].");
4319   case Match_InvalidImm0_63:
4320     return Error(Loc, "immediate must be an integer in range [0, 63].");
4321   case Match_InvalidImm0_127:
4322     return Error(Loc, "immediate must be an integer in range [0, 127].");
4323   case Match_InvalidImm0_255:
4324     return Error(Loc, "immediate must be an integer in range [0, 255].");
4325   case Match_InvalidImm0_65535:
4326     return Error(Loc, "immediate must be an integer in range [0, 65535].");
4327   case Match_InvalidImm1_8:
4328     return Error(Loc, "immediate must be an integer in range [1, 8].");
4329   case Match_InvalidImm1_16:
4330     return Error(Loc, "immediate must be an integer in range [1, 16].");
4331   case Match_InvalidImm1_32:
4332     return Error(Loc, "immediate must be an integer in range [1, 32].");
4333   case Match_InvalidImm1_64:
4334     return Error(Loc, "immediate must be an integer in range [1, 64].");
4335   case Match_InvalidSVEAddSubImm8:
4336     return Error(Loc, "immediate must be an integer in range [0, 255]"
4337                       " with a shift amount of 0");
4338   case Match_InvalidSVEAddSubImm16:
4339   case Match_InvalidSVEAddSubImm32:
4340   case Match_InvalidSVEAddSubImm64:
4341     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4342                       "multiple of 256 in range [256, 65280]");
4343   case Match_InvalidSVECpyImm8:
4344     return Error(Loc, "immediate must be an integer in range [-128, 255]"
4345                       " with a shift amount of 0");
4346   case Match_InvalidSVECpyImm16:
4347     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4348                       "multiple of 256 in range [-32768, 65280]");
4349   case Match_InvalidSVECpyImm32:
4350   case Match_InvalidSVECpyImm64:
4351     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4352                       "multiple of 256 in range [-32768, 32512]");
4353   case Match_InvalidIndexRange1_1:
4354     return Error(Loc, "expected lane specifier '[1]'");
4355   case Match_InvalidIndexRange0_15:
4356     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4357   case Match_InvalidIndexRange0_7:
4358     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4359   case Match_InvalidIndexRange0_3:
4360     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4361   case Match_InvalidIndexRange0_1:
4362     return Error(Loc, "vector lane must be an integer in range [0, 1].");
4363   case Match_InvalidSVEIndexRange0_63:
4364     return Error(Loc, "vector lane must be an integer in range [0, 63].");
4365   case Match_InvalidSVEIndexRange0_31:
4366     return Error(Loc, "vector lane must be an integer in range [0, 31].");
4367   case Match_InvalidSVEIndexRange0_15:
4368     return Error(Loc, "vector lane must be an integer in range [0, 15].");
4369   case Match_InvalidSVEIndexRange0_7:
4370     return Error(Loc, "vector lane must be an integer in range [0, 7].");
4371   case Match_InvalidSVEIndexRange0_3:
4372     return Error(Loc, "vector lane must be an integer in range [0, 3].");
4373   case Match_InvalidLabel:
4374     return Error(Loc, "expected label or encodable integer pc offset");
4375   case Match_MRS:
4376     return Error(Loc, "expected readable system register");
4377   case Match_MSR:
4378     return Error(Loc, "expected writable system register or pstate");
4379   case Match_InvalidComplexRotationEven:
4380     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4381   case Match_InvalidComplexRotationOdd:
4382     return Error(Loc, "complex rotation must be 90 or 270.");
4383   case Match_MnemonicFail: {
4384     std::string Suggestion = AArch64MnemonicSpellCheck(
4385         ((AArch64Operand &)*Operands[0]).getToken(),
4386         ComputeAvailableFeatures(STI->getFeatureBits()));
4387     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4388   }
4389   case Match_InvalidGPR64shifted8:
4390     return Error(Loc, "register must be x0..x30 or xzr, without shift");
4391   case Match_InvalidGPR64shifted16:
4392     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4393   case Match_InvalidGPR64shifted32:
4394     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4395   case Match_InvalidGPR64shifted64:
4396     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4397   case Match_InvalidGPR64NoXZRshifted8:
4398     return Error(Loc, "register must be x0..x30 without shift");
4399   case Match_InvalidGPR64NoXZRshifted16:
4400     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4401   case Match_InvalidGPR64NoXZRshifted32:
4402     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4403   case Match_InvalidGPR64NoXZRshifted64:
4404     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4405   case Match_InvalidZPR32UXTW8:
4406   case Match_InvalidZPR32SXTW8:
4407     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4408   case Match_InvalidZPR32UXTW16:
4409   case Match_InvalidZPR32SXTW16:
4410     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4411   case Match_InvalidZPR32UXTW32:
4412   case Match_InvalidZPR32SXTW32:
4413     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4414   case Match_InvalidZPR32UXTW64:
4415   case Match_InvalidZPR32SXTW64:
4416     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4417   case Match_InvalidZPR64UXTW8:
4418   case Match_InvalidZPR64SXTW8:
4419     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4420   case Match_InvalidZPR64UXTW16:
4421   case Match_InvalidZPR64SXTW16:
4422     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4423   case Match_InvalidZPR64UXTW32:
4424   case Match_InvalidZPR64SXTW32:
4425     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4426   case Match_InvalidZPR64UXTW64:
4427   case Match_InvalidZPR64SXTW64:
4428     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4429   case Match_InvalidZPR32LSL8:
4430     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4431   case Match_InvalidZPR32LSL16:
4432     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4433   case Match_InvalidZPR32LSL32:
4434     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4435   case Match_InvalidZPR32LSL64:
4436     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4437   case Match_InvalidZPR64LSL8:
4438     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4439   case Match_InvalidZPR64LSL16:
4440     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4441   case Match_InvalidZPR64LSL32:
4442     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4443   case Match_InvalidZPR64LSL64:
4444     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4445   case Match_InvalidZPR0:
4446     return Error(Loc, "expected register without element width suffix");
4447   case Match_InvalidZPR8:
4448   case Match_InvalidZPR16:
4449   case Match_InvalidZPR32:
4450   case Match_InvalidZPR64:
4451   case Match_InvalidZPR128:
4452     return Error(Loc, "invalid element width");
4453   case Match_InvalidZPR_3b8:
4454     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4455   case Match_InvalidZPR_3b16:
4456     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4457   case Match_InvalidZPR_3b32:
4458     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4459   case Match_InvalidZPR_4b16:
4460     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4461   case Match_InvalidZPR_4b32:
4462     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4463   case Match_InvalidZPR_4b64:
4464     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4465   case Match_InvalidSVEPattern:
4466     return Error(Loc, "invalid predicate pattern");
4467   case Match_InvalidSVEPredicateAnyReg:
4468   case Match_InvalidSVEPredicateBReg:
4469   case Match_InvalidSVEPredicateHReg:
4470   case Match_InvalidSVEPredicateSReg:
4471   case Match_InvalidSVEPredicateDReg:
4472     return Error(Loc, "invalid predicate register.");
4473   case Match_InvalidSVEPredicate3bAnyReg:
4474     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
4475   case Match_InvalidSVEPredicate3bBReg:
4476     return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
4477   case Match_InvalidSVEPredicate3bHReg:
4478     return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
4479   case Match_InvalidSVEPredicate3bSReg:
4480     return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
4481   case Match_InvalidSVEPredicate3bDReg:
4482     return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
4483   case Match_InvalidSVEExactFPImmOperandHalfOne:
4484     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4485   case Match_InvalidSVEExactFPImmOperandHalfTwo:
4486     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4487   case Match_InvalidSVEExactFPImmOperandZeroOne:
4488     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4489   default:
4490     llvm_unreachable("unexpected error code!");
4491   }
4492 }
4493 
4494 static const char *getSubtargetFeatureName(uint64_t Val);
4495 
4496 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4497                                                OperandVector &Operands,
4498                                                MCStreamer &Out,
4499                                                uint64_t &ErrorInfo,
4500                                                bool MatchingInlineAsm) {
4501   assert(!Operands.empty() && "Unexpect empty operand list!");
4502   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4503   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4504 
4505   StringRef Tok = Op.getToken();
4506   unsigned NumOperands = Operands.size();
4507 
4508   if (NumOperands == 4 && Tok == "lsl") {
4509     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4510     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4511     if (Op2.isScalarReg() && Op3.isImm()) {
4512       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4513       if (Op3CE) {
4514         uint64_t Op3Val = Op3CE->getValue();
4515         uint64_t NewOp3Val = 0;
4516         uint64_t NewOp4Val = 0;
4517         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4518                 Op2.getReg())) {
4519           NewOp3Val = (32 - Op3Val) & 0x1f;
4520           NewOp4Val = 31 - Op3Val;
4521         } else {
4522           NewOp3Val = (64 - Op3Val) & 0x3f;
4523           NewOp4Val = 63 - Op3Val;
4524         }
4525 
4526         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4527         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4528 
4529         Operands[0] = AArch64Operand::CreateToken(
4530             "ubfm", false, Op.getStartLoc(), getContext());
4531         Operands.push_back(AArch64Operand::CreateImm(
4532             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4533         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4534                                                 Op3.getEndLoc(), getContext());
4535       }
4536     }
4537   } else if (NumOperands == 4 && Tok == "bfc") {
4538     // FIXME: Horrible hack to handle BFC->BFM alias.
4539     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4540     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4541     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4542 
4543     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4544       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4545       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4546 
4547       if (LSBCE && WidthCE) {
4548         uint64_t LSB = LSBCE->getValue();
4549         uint64_t Width = WidthCE->getValue();
4550 
4551         uint64_t RegWidth = 0;
4552         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4553                 Op1.getReg()))
4554           RegWidth = 64;
4555         else
4556           RegWidth = 32;
4557 
4558         if (LSB >= RegWidth)
4559           return Error(LSBOp.getStartLoc(),
4560                        "expected integer in range [0, 31]");
4561         if (Width < 1 || Width > RegWidth)
4562           return Error(WidthOp.getStartLoc(),
4563                        "expected integer in range [1, 32]");
4564 
4565         uint64_t ImmR = 0;
4566         if (RegWidth == 32)
4567           ImmR = (32 - LSB) & 0x1f;
4568         else
4569           ImmR = (64 - LSB) & 0x3f;
4570 
4571         uint64_t ImmS = Width - 1;
4572 
4573         if (ImmR != 0 && ImmS >= ImmR)
4574           return Error(WidthOp.getStartLoc(),
4575                        "requested insert overflows register");
4576 
4577         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4578         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4579         Operands[0] = AArch64Operand::CreateToken(
4580               "bfm", false, Op.getStartLoc(), getContext());
4581         Operands[2] = AArch64Operand::CreateReg(
4582             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4583             SMLoc(), SMLoc(), getContext());
4584         Operands[3] = AArch64Operand::CreateImm(
4585             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4586         Operands.emplace_back(
4587             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4588                                       WidthOp.getEndLoc(), getContext()));
4589       }
4590     }
4591   } else if (NumOperands == 5) {
4592     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4593     // UBFIZ -> UBFM aliases.
4594     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4595       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4596       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4597       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4598 
4599       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4600         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4601         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4602 
4603         if (Op3CE && Op4CE) {
4604           uint64_t Op3Val = Op3CE->getValue();
4605           uint64_t Op4Val = Op4CE->getValue();
4606 
4607           uint64_t RegWidth = 0;
4608           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4609                   Op1.getReg()))
4610             RegWidth = 64;
4611           else
4612             RegWidth = 32;
4613 
4614           if (Op3Val >= RegWidth)
4615             return Error(Op3.getStartLoc(),
4616                          "expected integer in range [0, 31]");
4617           if (Op4Val < 1 || Op4Val > RegWidth)
4618             return Error(Op4.getStartLoc(),
4619                          "expected integer in range [1, 32]");
4620 
4621           uint64_t NewOp3Val = 0;
4622           if (RegWidth == 32)
4623             NewOp3Val = (32 - Op3Val) & 0x1f;
4624           else
4625             NewOp3Val = (64 - Op3Val) & 0x3f;
4626 
4627           uint64_t NewOp4Val = Op4Val - 1;
4628 
4629           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4630             return Error(Op4.getStartLoc(),
4631                          "requested insert overflows register");
4632 
4633           const MCExpr *NewOp3 =
4634               MCConstantExpr::create(NewOp3Val, getContext());
4635           const MCExpr *NewOp4 =
4636               MCConstantExpr::create(NewOp4Val, getContext());
4637           Operands[3] = AArch64Operand::CreateImm(
4638               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4639           Operands[4] = AArch64Operand::CreateImm(
4640               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4641           if (Tok == "bfi")
4642             Operands[0] = AArch64Operand::CreateToken(
4643                 "bfm", false, Op.getStartLoc(), getContext());
4644           else if (Tok == "sbfiz")
4645             Operands[0] = AArch64Operand::CreateToken(
4646                 "sbfm", false, Op.getStartLoc(), getContext());
4647           else if (Tok == "ubfiz")
4648             Operands[0] = AArch64Operand::CreateToken(
4649                 "ubfm", false, Op.getStartLoc(), getContext());
4650           else
4651             llvm_unreachable("No valid mnemonic for alias?");
4652         }
4653       }
4654 
4655       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4656       // UBFX -> UBFM aliases.
4657     } else if (NumOperands == 5 &&
4658                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4659       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4660       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4661       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4662 
4663       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4664         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4665         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4666 
4667         if (Op3CE && Op4CE) {
4668           uint64_t Op3Val = Op3CE->getValue();
4669           uint64_t Op4Val = Op4CE->getValue();
4670 
4671           uint64_t RegWidth = 0;
4672           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4673                   Op1.getReg()))
4674             RegWidth = 64;
4675           else
4676             RegWidth = 32;
4677 
4678           if (Op3Val >= RegWidth)
4679             return Error(Op3.getStartLoc(),
4680                          "expected integer in range [0, 31]");
4681           if (Op4Val < 1 || Op4Val > RegWidth)
4682             return Error(Op4.getStartLoc(),
4683                          "expected integer in range [1, 32]");
4684 
4685           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4686 
4687           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4688             return Error(Op4.getStartLoc(),
4689                          "requested extract overflows register");
4690 
4691           const MCExpr *NewOp4 =
4692               MCConstantExpr::create(NewOp4Val, getContext());
4693           Operands[4] = AArch64Operand::CreateImm(
4694               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4695           if (Tok == "bfxil")
4696             Operands[0] = AArch64Operand::CreateToken(
4697                 "bfm", false, Op.getStartLoc(), getContext());
4698           else if (Tok == "sbfx")
4699             Operands[0] = AArch64Operand::CreateToken(
4700                 "sbfm", false, Op.getStartLoc(), getContext());
4701           else if (Tok == "ubfx")
4702             Operands[0] = AArch64Operand::CreateToken(
4703                 "ubfm", false, Op.getStartLoc(), getContext());
4704           else
4705             llvm_unreachable("No valid mnemonic for alias?");
4706         }
4707       }
4708     }
4709   }
4710 
4711   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4712   // instruction for FP registers correctly in some rare circumstances. Convert
4713   // it to a safe instruction and warn (because silently changing someone's
4714   // assembly is rude).
4715   if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4716       NumOperands == 4 && Tok == "movi") {
4717     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4718     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4719     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4720     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4721         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4722       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4723       if (Suffix.lower() == ".2d" &&
4724           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4725         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4726                 " correctly on this CPU, converting to equivalent movi.16b");
4727         // Switch the suffix to .16b.
4728         unsigned Idx = Op1.isToken() ? 1 : 2;
4729         Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4730                                                   getContext());
4731       }
4732     }
4733   }
4734 
4735   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4736   //        InstAlias can't quite handle this since the reg classes aren't
4737   //        subclasses.
4738   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4739     // The source register can be Wn here, but the matcher expects a
4740     // GPR64. Twiddle it here if necessary.
4741     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4742     if (Op.isScalarReg()) {
4743       unsigned Reg = getXRegFromWReg(Op.getReg());
4744       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4745                                               Op.getStartLoc(), Op.getEndLoc(),
4746                                               getContext());
4747     }
4748   }
4749   // FIXME: Likewise for sxt[bh] with a Xd dst operand
4750   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4751     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4752     if (Op.isScalarReg() &&
4753         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4754             Op.getReg())) {
4755       // The source register can be Wn here, but the matcher expects a
4756       // GPR64. Twiddle it here if necessary.
4757       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4758       if (Op.isScalarReg()) {
4759         unsigned Reg = getXRegFromWReg(Op.getReg());
4760         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4761                                                 Op.getStartLoc(),
4762                                                 Op.getEndLoc(), getContext());
4763       }
4764     }
4765   }
4766   // FIXME: Likewise for uxt[bh] with a Xd dst operand
4767   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4768     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4769     if (Op.isScalarReg() &&
4770         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4771             Op.getReg())) {
4772       // The source register can be Wn here, but the matcher expects a
4773       // GPR32. Twiddle it here if necessary.
4774       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4775       if (Op.isScalarReg()) {
4776         unsigned Reg = getWRegFromXReg(Op.getReg());
4777         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4778                                                 Op.getStartLoc(),
4779                                                 Op.getEndLoc(), getContext());
4780       }
4781     }
4782   }
4783 
4784   MCInst Inst;
4785   FeatureBitset MissingFeatures;
4786   // First try to match against the secondary set of tables containing the
4787   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4788   unsigned MatchResult =
4789       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4790                            MatchingInlineAsm, 1);
4791 
4792   // If that fails, try against the alternate table containing long-form NEON:
4793   // "fadd v0.2s, v1.2s, v2.2s"
4794   if (MatchResult != Match_Success) {
4795     // But first, save the short-form match result: we can use it in case the
4796     // long-form match also fails.
4797     auto ShortFormNEONErrorInfo = ErrorInfo;
4798     auto ShortFormNEONMatchResult = MatchResult;
4799     auto ShortFormNEONMissingFeatures = MissingFeatures;
4800 
4801     MatchResult =
4802         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
4803                              MatchingInlineAsm, 0);
4804 
4805     // Now, both matches failed, and the long-form match failed on the mnemonic
4806     // suffix token operand.  The short-form match failure is probably more
4807     // relevant: use it instead.
4808     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4809         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4810         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4811       MatchResult = ShortFormNEONMatchResult;
4812       ErrorInfo = ShortFormNEONErrorInfo;
4813       MissingFeatures = ShortFormNEONMissingFeatures;
4814     }
4815   }
4816 
4817   switch (MatchResult) {
4818   case Match_Success: {
4819     // Perform range checking and other semantic validations
4820     SmallVector<SMLoc, 8> OperandLocs;
4821     NumOperands = Operands.size();
4822     for (unsigned i = 1; i < NumOperands; ++i)
4823       OperandLocs.push_back(Operands[i]->getStartLoc());
4824     if (validateInstruction(Inst, IDLoc, OperandLocs))
4825       return true;
4826 
4827     Inst.setLoc(IDLoc);
4828     Out.EmitInstruction(Inst, getSTI());
4829     return false;
4830   }
4831   case Match_MissingFeature: {
4832     assert(MissingFeatures.any() && "Unknown missing feature!");
4833     // Special case the error message for the very common case where only
4834     // a single subtarget feature is missing (neon, e.g.).
4835     std::string Msg = "instruction requires:";
4836     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4837       if (MissingFeatures[i]) {
4838         Msg += " ";
4839         Msg += getSubtargetFeatureName(i);
4840       }
4841     }
4842     return Error(IDLoc, Msg);
4843   }
4844   case Match_MnemonicFail:
4845     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4846   case Match_InvalidOperand: {
4847     SMLoc ErrorLoc = IDLoc;
4848 
4849     if (ErrorInfo != ~0ULL) {
4850       if (ErrorInfo >= Operands.size())
4851         return Error(IDLoc, "too few operands for instruction",
4852                      SMRange(IDLoc, getTok().getLoc()));
4853 
4854       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4855       if (ErrorLoc == SMLoc())
4856         ErrorLoc = IDLoc;
4857     }
4858     // If the match failed on a suffix token operand, tweak the diagnostic
4859     // accordingly.
4860     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4861         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4862       MatchResult = Match_InvalidSuffix;
4863 
4864     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4865   }
4866   case Match_InvalidTiedOperand:
4867   case Match_InvalidMemoryIndexed1:
4868   case Match_InvalidMemoryIndexed2:
4869   case Match_InvalidMemoryIndexed4:
4870   case Match_InvalidMemoryIndexed8:
4871   case Match_InvalidMemoryIndexed16:
4872   case Match_InvalidCondCode:
4873   case Match_AddSubRegExtendSmall:
4874   case Match_AddSubRegExtendLarge:
4875   case Match_AddSubSecondSource:
4876   case Match_LogicalSecondSource:
4877   case Match_AddSubRegShift32:
4878   case Match_AddSubRegShift64:
4879   case Match_InvalidMovImm32Shift:
4880   case Match_InvalidMovImm64Shift:
4881   case Match_InvalidFPImm:
4882   case Match_InvalidMemoryWExtend8:
4883   case Match_InvalidMemoryWExtend16:
4884   case Match_InvalidMemoryWExtend32:
4885   case Match_InvalidMemoryWExtend64:
4886   case Match_InvalidMemoryWExtend128:
4887   case Match_InvalidMemoryXExtend8:
4888   case Match_InvalidMemoryXExtend16:
4889   case Match_InvalidMemoryXExtend32:
4890   case Match_InvalidMemoryXExtend64:
4891   case Match_InvalidMemoryXExtend128:
4892   case Match_InvalidMemoryIndexed1SImm4:
4893   case Match_InvalidMemoryIndexed2SImm4:
4894   case Match_InvalidMemoryIndexed3SImm4:
4895   case Match_InvalidMemoryIndexed4SImm4:
4896   case Match_InvalidMemoryIndexed1SImm6:
4897   case Match_InvalidMemoryIndexed16SImm4:
4898   case Match_InvalidMemoryIndexed4SImm7:
4899   case Match_InvalidMemoryIndexed8SImm7:
4900   case Match_InvalidMemoryIndexed16SImm7:
4901   case Match_InvalidMemoryIndexed8UImm5:
4902   case Match_InvalidMemoryIndexed4UImm5:
4903   case Match_InvalidMemoryIndexed2UImm5:
4904   case Match_InvalidMemoryIndexed1UImm6:
4905   case Match_InvalidMemoryIndexed2UImm6:
4906   case Match_InvalidMemoryIndexed4UImm6:
4907   case Match_InvalidMemoryIndexed8UImm6:
4908   case Match_InvalidMemoryIndexed16UImm6:
4909   case Match_InvalidMemoryIndexedSImm6:
4910   case Match_InvalidMemoryIndexedSImm5:
4911   case Match_InvalidMemoryIndexedSImm8:
4912   case Match_InvalidMemoryIndexedSImm9:
4913   case Match_InvalidMemoryIndexed16SImm9:
4914   case Match_InvalidMemoryIndexed8SImm10:
4915   case Match_InvalidImm0_1:
4916   case Match_InvalidImm0_7:
4917   case Match_InvalidImm0_15:
4918   case Match_InvalidImm0_31:
4919   case Match_InvalidImm0_63:
4920   case Match_InvalidImm0_127:
4921   case Match_InvalidImm0_255:
4922   case Match_InvalidImm0_65535:
4923   case Match_InvalidImm1_8:
4924   case Match_InvalidImm1_16:
4925   case Match_InvalidImm1_32:
4926   case Match_InvalidImm1_64:
4927   case Match_InvalidSVEAddSubImm8:
4928   case Match_InvalidSVEAddSubImm16:
4929   case Match_InvalidSVEAddSubImm32:
4930   case Match_InvalidSVEAddSubImm64:
4931   case Match_InvalidSVECpyImm8:
4932   case Match_InvalidSVECpyImm16:
4933   case Match_InvalidSVECpyImm32:
4934   case Match_InvalidSVECpyImm64:
4935   case Match_InvalidIndexRange1_1:
4936   case Match_InvalidIndexRange0_15:
4937   case Match_InvalidIndexRange0_7:
4938   case Match_InvalidIndexRange0_3:
4939   case Match_InvalidIndexRange0_1:
4940   case Match_InvalidSVEIndexRange0_63:
4941   case Match_InvalidSVEIndexRange0_31:
4942   case Match_InvalidSVEIndexRange0_15:
4943   case Match_InvalidSVEIndexRange0_7:
4944   case Match_InvalidSVEIndexRange0_3:
4945   case Match_InvalidLabel:
4946   case Match_InvalidComplexRotationEven:
4947   case Match_InvalidComplexRotationOdd:
4948   case Match_InvalidGPR64shifted8:
4949   case Match_InvalidGPR64shifted16:
4950   case Match_InvalidGPR64shifted32:
4951   case Match_InvalidGPR64shifted64:
4952   case Match_InvalidGPR64NoXZRshifted8:
4953   case Match_InvalidGPR64NoXZRshifted16:
4954   case Match_InvalidGPR64NoXZRshifted32:
4955   case Match_InvalidGPR64NoXZRshifted64:
4956   case Match_InvalidZPR32UXTW8:
4957   case Match_InvalidZPR32UXTW16:
4958   case Match_InvalidZPR32UXTW32:
4959   case Match_InvalidZPR32UXTW64:
4960   case Match_InvalidZPR32SXTW8:
4961   case Match_InvalidZPR32SXTW16:
4962   case Match_InvalidZPR32SXTW32:
4963   case Match_InvalidZPR32SXTW64:
4964   case Match_InvalidZPR64UXTW8:
4965   case Match_InvalidZPR64SXTW8:
4966   case Match_InvalidZPR64UXTW16:
4967   case Match_InvalidZPR64SXTW16:
4968   case Match_InvalidZPR64UXTW32:
4969   case Match_InvalidZPR64SXTW32:
4970   case Match_InvalidZPR64UXTW64:
4971   case Match_InvalidZPR64SXTW64:
4972   case Match_InvalidZPR32LSL8:
4973   case Match_InvalidZPR32LSL16:
4974   case Match_InvalidZPR32LSL32:
4975   case Match_InvalidZPR32LSL64:
4976   case Match_InvalidZPR64LSL8:
4977   case Match_InvalidZPR64LSL16:
4978   case Match_InvalidZPR64LSL32:
4979   case Match_InvalidZPR64LSL64:
4980   case Match_InvalidZPR0:
4981   case Match_InvalidZPR8:
4982   case Match_InvalidZPR16:
4983   case Match_InvalidZPR32:
4984   case Match_InvalidZPR64:
4985   case Match_InvalidZPR128:
4986   case Match_InvalidZPR_3b8:
4987   case Match_InvalidZPR_3b16:
4988   case Match_InvalidZPR_3b32:
4989   case Match_InvalidZPR_4b16:
4990   case Match_InvalidZPR_4b32:
4991   case Match_InvalidZPR_4b64:
4992   case Match_InvalidSVEPredicateAnyReg:
4993   case Match_InvalidSVEPattern:
4994   case Match_InvalidSVEPredicateBReg:
4995   case Match_InvalidSVEPredicateHReg:
4996   case Match_InvalidSVEPredicateSReg:
4997   case Match_InvalidSVEPredicateDReg:
4998   case Match_InvalidSVEPredicate3bAnyReg:
4999   case Match_InvalidSVEPredicate3bBReg:
5000   case Match_InvalidSVEPredicate3bHReg:
5001   case Match_InvalidSVEPredicate3bSReg:
5002   case Match_InvalidSVEPredicate3bDReg:
5003   case Match_InvalidSVEExactFPImmOperandHalfOne:
5004   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5005   case Match_InvalidSVEExactFPImmOperandZeroOne:
5006   case Match_MSR:
5007   case Match_MRS: {
5008     if (ErrorInfo >= Operands.size())
5009       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5010     // Any time we get here, there's nothing fancy to do. Just get the
5011     // operand SMLoc and display the diagnostic.
5012     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5013     if (ErrorLoc == SMLoc())
5014       ErrorLoc = IDLoc;
5015     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5016   }
5017   }
5018 
5019   llvm_unreachable("Implement any new match types added!");
5020 }
5021 
5022 /// ParseDirective parses the arm specific directives
5023 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5024   const MCObjectFileInfo::Environment Format =
5025     getContext().getObjectFileInfo()->getObjectFileType();
5026   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
5027 
5028   StringRef IDVal = DirectiveID.getIdentifier();
5029   SMLoc Loc = DirectiveID.getLoc();
5030   if (IDVal == ".arch")
5031     parseDirectiveArch(Loc);
5032   else if (IDVal == ".cpu")
5033     parseDirectiveCPU(Loc);
5034   else if (IDVal == ".tlsdesccall")
5035     parseDirectiveTLSDescCall(Loc);
5036   else if (IDVal == ".ltorg" || IDVal == ".pool")
5037     parseDirectiveLtorg(Loc);
5038   else if (IDVal == ".unreq")
5039     parseDirectiveUnreq(Loc);
5040   else if (IDVal == ".inst")
5041     parseDirectiveInst(Loc);
5042   else if (IDVal == ".cfi_negate_ra_state")
5043     parseDirectiveCFINegateRAState();
5044   else if (IDVal == ".cfi_b_key_frame")
5045     parseDirectiveCFIBKeyFrame();
5046   else if (IDVal == ".arch_extension")
5047     parseDirectiveArchExtension(Loc);
5048   else if (IsMachO) {
5049     if (IDVal == MCLOHDirectiveName())
5050       parseDirectiveLOH(IDVal, Loc);
5051     else
5052       return true;
5053   } else
5054     return true;
5055   return false;
5056 }
5057 
5058 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5059                             SmallVector<StringRef, 4> &RequestedExtensions) {
5060   const bool NoCrypto =
5061       (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5062                  "nocrypto") != std::end(RequestedExtensions));
5063   const bool Crypto =
5064       (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
5065                  "crypto") != std::end(RequestedExtensions));
5066 
5067   if (!NoCrypto && Crypto) {
5068     switch (ArchKind) {
5069     default:
5070       // Map 'generic' (and others) to sha2 and aes, because
5071       // that was the traditional meaning of crypto.
5072     case AArch64::ArchKind::ARMV8_1A:
5073     case AArch64::ArchKind::ARMV8_2A:
5074     case AArch64::ArchKind::ARMV8_3A:
5075       RequestedExtensions.push_back("sha2");
5076       RequestedExtensions.push_back("aes");
5077       break;
5078     case AArch64::ArchKind::ARMV8_4A:
5079     case AArch64::ArchKind::ARMV8_5A:
5080       RequestedExtensions.push_back("sm4");
5081       RequestedExtensions.push_back("sha3");
5082       RequestedExtensions.push_back("sha2");
5083       RequestedExtensions.push_back("aes");
5084       break;
5085     }
5086   } else if (NoCrypto) {
5087     switch (ArchKind) {
5088     default:
5089       // Map 'generic' (and others) to sha2 and aes, because
5090       // that was the traditional meaning of crypto.
5091     case AArch64::ArchKind::ARMV8_1A:
5092     case AArch64::ArchKind::ARMV8_2A:
5093     case AArch64::ArchKind::ARMV8_3A:
5094       RequestedExtensions.push_back("nosha2");
5095       RequestedExtensions.push_back("noaes");
5096       break;
5097     case AArch64::ArchKind::ARMV8_4A:
5098     case AArch64::ArchKind::ARMV8_5A:
5099       RequestedExtensions.push_back("nosm4");
5100       RequestedExtensions.push_back("nosha3");
5101       RequestedExtensions.push_back("nosha2");
5102       RequestedExtensions.push_back("noaes");
5103       break;
5104     }
5105   }
5106 }
5107 
5108 /// parseDirectiveArch
5109 ///   ::= .arch token
5110 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5111   SMLoc ArchLoc = getLoc();
5112 
5113   StringRef Arch, ExtensionString;
5114   std::tie(Arch, ExtensionString) =
5115       getParser().parseStringToEndOfStatement().trim().split('+');
5116 
5117   AArch64::ArchKind ID = AArch64::parseArch(Arch);
5118   if (ID == AArch64::ArchKind::INVALID)
5119     return Error(ArchLoc, "unknown arch name");
5120 
5121   if (parseToken(AsmToken::EndOfStatement))
5122     return true;
5123 
5124   // Get the architecture and extension features.
5125   std::vector<StringRef> AArch64Features;
5126   AArch64::getArchFeatures(ID, AArch64Features);
5127   AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
5128                                 AArch64Features);
5129 
5130   MCSubtargetInfo &STI = copySTI();
5131   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5132   STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
5133 
5134   SmallVector<StringRef, 4> RequestedExtensions;
5135   if (!ExtensionString.empty())
5136     ExtensionString.split(RequestedExtensions, '+');
5137 
5138   ExpandCryptoAEK(ID, RequestedExtensions);
5139 
5140   FeatureBitset Features = STI.getFeatureBits();
5141   for (auto Name : RequestedExtensions) {
5142     bool EnableFeature = true;
5143 
5144     if (Name.startswith_lower("no")) {
5145       EnableFeature = false;
5146       Name = Name.substr(2);
5147     }
5148 
5149     for (const auto &Extension : ExtensionMap) {
5150       if (Extension.Name != Name)
5151         continue;
5152 
5153       if (Extension.Features.none())
5154         report_fatal_error("unsupported architectural extension: " + Name);
5155 
5156       FeatureBitset ToggleFeatures = EnableFeature
5157                                          ? (~Features & Extension.Features)
5158                                          : ( Features & Extension.Features);
5159       FeatureBitset Features =
5160           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5161       setAvailableFeatures(Features);
5162       break;
5163     }
5164   }
5165   return false;
5166 }
5167 
5168 /// parseDirectiveArchExtension
5169 ///   ::= .arch_extension [no]feature
5170 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
5171   SMLoc ExtLoc = getLoc();
5172 
5173   StringRef Name = getParser().parseStringToEndOfStatement().trim();
5174 
5175   if (parseToken(AsmToken::EndOfStatement,
5176                  "unexpected token in '.arch_extension' directive"))
5177     return true;
5178 
5179   bool EnableFeature = true;
5180   if (Name.startswith_lower("no")) {
5181     EnableFeature = false;
5182     Name = Name.substr(2);
5183   }
5184 
5185   MCSubtargetInfo &STI = copySTI();
5186   FeatureBitset Features = STI.getFeatureBits();
5187   for (const auto &Extension : ExtensionMap) {
5188     if (Extension.Name != Name)
5189       continue;
5190 
5191     if (Extension.Features.none())
5192       return Error(ExtLoc, "unsupported architectural extension: " + Name);
5193 
5194     FeatureBitset ToggleFeatures = EnableFeature
5195                                        ? (~Features & Extension.Features)
5196                                        : (Features & Extension.Features);
5197     FeatureBitset Features =
5198         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5199     setAvailableFeatures(Features);
5200     return false;
5201   }
5202 
5203   return Error(ExtLoc, "unknown architectural extension: " + Name);
5204 }
5205 
5206 static SMLoc incrementLoc(SMLoc L, int Offset) {
5207   return SMLoc::getFromPointer(L.getPointer() + Offset);
5208 }
5209 
5210 /// parseDirectiveCPU
5211 ///   ::= .cpu id
5212 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5213   SMLoc CurLoc = getLoc();
5214 
5215   StringRef CPU, ExtensionString;
5216   std::tie(CPU, ExtensionString) =
5217       getParser().parseStringToEndOfStatement().trim().split('+');
5218 
5219   if (parseToken(AsmToken::EndOfStatement))
5220     return true;
5221 
5222   SmallVector<StringRef, 4> RequestedExtensions;
5223   if (!ExtensionString.empty())
5224     ExtensionString.split(RequestedExtensions, '+');
5225 
5226   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5227   // once that is tablegen'ed
5228   if (!getSTI().isCPUStringValid(CPU)) {
5229     Error(CurLoc, "unknown CPU name");
5230     return false;
5231   }
5232 
5233   MCSubtargetInfo &STI = copySTI();
5234   STI.setDefaultFeatures(CPU, "");
5235   CurLoc = incrementLoc(CurLoc, CPU.size());
5236 
5237   ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5238 
5239   FeatureBitset Features = STI.getFeatureBits();
5240   for (auto Name : RequestedExtensions) {
5241     // Advance source location past '+'.
5242     CurLoc = incrementLoc(CurLoc, 1);
5243 
5244     bool EnableFeature = true;
5245 
5246     if (Name.startswith_lower("no")) {
5247       EnableFeature = false;
5248       Name = Name.substr(2);
5249     }
5250 
5251     bool FoundExtension = false;
5252     for (const auto &Extension : ExtensionMap) {
5253       if (Extension.Name != Name)
5254         continue;
5255 
5256       if (Extension.Features.none())
5257         report_fatal_error("unsupported architectural extension: " + Name);
5258 
5259       FeatureBitset ToggleFeatures = EnableFeature
5260                                          ? (~Features & Extension.Features)
5261                                          : ( Features & Extension.Features);
5262       FeatureBitset Features =
5263           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5264       setAvailableFeatures(Features);
5265       FoundExtension = true;
5266 
5267       break;
5268     }
5269 
5270     if (!FoundExtension)
5271       Error(CurLoc, "unsupported architectural extension");
5272 
5273     CurLoc = incrementLoc(CurLoc, Name.size());
5274   }
5275   return false;
5276 }
5277 
5278 /// parseDirectiveInst
5279 ///  ::= .inst opcode [, ...]
5280 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5281   if (getLexer().is(AsmToken::EndOfStatement))
5282     return Error(Loc, "expected expression following '.inst' directive");
5283 
5284   auto parseOp = [&]() -> bool {
5285     SMLoc L = getLoc();
5286     const MCExpr *Expr;
5287     if (check(getParser().parseExpression(Expr), L, "expected expression"))
5288       return true;
5289     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5290     if (check(!Value, L, "expected constant expression"))
5291       return true;
5292     getTargetStreamer().emitInst(Value->getValue());
5293     return false;
5294   };
5295 
5296   if (parseMany(parseOp))
5297     return addErrorSuffix(" in '.inst' directive");
5298   return false;
5299 }
5300 
5301 // parseDirectiveTLSDescCall:
5302 //   ::= .tlsdesccall symbol
5303 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5304   StringRef Name;
5305   if (check(getParser().parseIdentifier(Name), L,
5306             "expected symbol after directive") ||
5307       parseToken(AsmToken::EndOfStatement))
5308     return true;
5309 
5310   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5311   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5312   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5313 
5314   MCInst Inst;
5315   Inst.setOpcode(AArch64::TLSDESCCALL);
5316   Inst.addOperand(MCOperand::createExpr(Expr));
5317 
5318   getParser().getStreamer().EmitInstruction(Inst, getSTI());
5319   return false;
5320 }
5321 
5322 /// ::= .loh <lohName | lohId> label1, ..., labelN
5323 /// The number of arguments depends on the loh identifier.
5324 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5325   MCLOHType Kind;
5326   if (getParser().getTok().isNot(AsmToken::Identifier)) {
5327     if (getParser().getTok().isNot(AsmToken::Integer))
5328       return TokError("expected an identifier or a number in directive");
5329     // We successfully get a numeric value for the identifier.
5330     // Check if it is valid.
5331     int64_t Id = getParser().getTok().getIntVal();
5332     if (Id <= -1U && !isValidMCLOHType(Id))
5333       return TokError("invalid numeric identifier in directive");
5334     Kind = (MCLOHType)Id;
5335   } else {
5336     StringRef Name = getTok().getIdentifier();
5337     // We successfully parse an identifier.
5338     // Check if it is a recognized one.
5339     int Id = MCLOHNameToId(Name);
5340 
5341     if (Id == -1)
5342       return TokError("invalid identifier in directive");
5343     Kind = (MCLOHType)Id;
5344   }
5345   // Consume the identifier.
5346   Lex();
5347   // Get the number of arguments of this LOH.
5348   int NbArgs = MCLOHIdToNbArgs(Kind);
5349 
5350   assert(NbArgs != -1 && "Invalid number of arguments");
5351 
5352   SmallVector<MCSymbol *, 3> Args;
5353   for (int Idx = 0; Idx < NbArgs; ++Idx) {
5354     StringRef Name;
5355     if (getParser().parseIdentifier(Name))
5356       return TokError("expected identifier in directive");
5357     Args.push_back(getContext().getOrCreateSymbol(Name));
5358 
5359     if (Idx + 1 == NbArgs)
5360       break;
5361     if (parseToken(AsmToken::Comma,
5362                    "unexpected token in '" + Twine(IDVal) + "' directive"))
5363       return true;
5364   }
5365   if (parseToken(AsmToken::EndOfStatement,
5366                  "unexpected token in '" + Twine(IDVal) + "' directive"))
5367     return true;
5368 
5369   getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5370   return false;
5371 }
5372 
5373 /// parseDirectiveLtorg
5374 ///  ::= .ltorg | .pool
5375 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5376   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5377     return true;
5378   getTargetStreamer().emitCurrentConstantPool();
5379   return false;
5380 }
5381 
5382 /// parseDirectiveReq
5383 ///  ::= name .req registername
5384 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5385   MCAsmParser &Parser = getParser();
5386   Parser.Lex(); // Eat the '.req' token.
5387   SMLoc SRegLoc = getLoc();
5388   RegKind RegisterKind = RegKind::Scalar;
5389   unsigned RegNum;
5390   OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5391 
5392   if (ParseRes != MatchOperand_Success) {
5393     StringRef Kind;
5394     RegisterKind = RegKind::NeonVector;
5395     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5396 
5397     if (ParseRes == MatchOperand_ParseFail)
5398       return true;
5399 
5400     if (ParseRes == MatchOperand_Success && !Kind.empty())
5401       return Error(SRegLoc, "vector register without type specifier expected");
5402   }
5403 
5404   if (ParseRes != MatchOperand_Success) {
5405     StringRef Kind;
5406     RegisterKind = RegKind::SVEDataVector;
5407     ParseRes =
5408         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5409 
5410     if (ParseRes == MatchOperand_ParseFail)
5411       return true;
5412 
5413     if (ParseRes == MatchOperand_Success && !Kind.empty())
5414       return Error(SRegLoc,
5415                    "sve vector register without type specifier expected");
5416   }
5417 
5418   if (ParseRes != MatchOperand_Success) {
5419     StringRef Kind;
5420     RegisterKind = RegKind::SVEPredicateVector;
5421     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5422 
5423     if (ParseRes == MatchOperand_ParseFail)
5424       return true;
5425 
5426     if (ParseRes == MatchOperand_Success && !Kind.empty())
5427       return Error(SRegLoc,
5428                    "sve predicate register without type specifier expected");
5429   }
5430 
5431   if (ParseRes != MatchOperand_Success)
5432     return Error(SRegLoc, "register name or alias expected");
5433 
5434   // Shouldn't be anything else.
5435   if (parseToken(AsmToken::EndOfStatement,
5436                  "unexpected input in .req directive"))
5437     return true;
5438 
5439   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5440   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5441     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5442 
5443   return false;
5444 }
5445 
5446 /// parseDirectiveUneq
5447 ///  ::= .unreq registername
5448 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5449   MCAsmParser &Parser = getParser();
5450   if (getTok().isNot(AsmToken::Identifier))
5451     return TokError("unexpected input in .unreq directive.");
5452   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5453   Parser.Lex(); // Eat the identifier.
5454   if (parseToken(AsmToken::EndOfStatement))
5455     return addErrorSuffix("in '.unreq' directive");
5456   return false;
5457 }
5458 
5459 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5460   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5461     return true;
5462   getStreamer().EmitCFINegateRAState();
5463   return false;
5464 }
5465 
5466 /// parseDirectiveCFIBKeyFrame
5467 /// ::= .cfi_b_key
5468 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5469   if (parseToken(AsmToken::EndOfStatement,
5470                  "unexpected token in '.cfi_b_key_frame'"))
5471     return true;
5472   getStreamer().EmitCFIBKeyFrame();
5473   return false;
5474 }
5475 
5476 bool
5477 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5478                                     AArch64MCExpr::VariantKind &ELFRefKind,
5479                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
5480                                     int64_t &Addend) {
5481   ELFRefKind = AArch64MCExpr::VK_INVALID;
5482   DarwinRefKind = MCSymbolRefExpr::VK_None;
5483   Addend = 0;
5484 
5485   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5486     ELFRefKind = AE->getKind();
5487     Expr = AE->getSubExpr();
5488   }
5489 
5490   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5491   if (SE) {
5492     // It's a simple symbol reference with no addend.
5493     DarwinRefKind = SE->getKind();
5494     return true;
5495   }
5496 
5497   // Check that it looks like a symbol + an addend
5498   MCValue Res;
5499   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
5500   if (!Relocatable || Res.getSymB())
5501     return false;
5502 
5503   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
5504   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
5505   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
5506     return false;
5507 
5508   if (Res.getSymA())
5509     DarwinRefKind = Res.getSymA()->getKind();
5510   Addend = Res.getConstant();
5511 
5512   // It's some symbol reference + a constant addend, but really
5513   // shouldn't use both Darwin and ELF syntax.
5514   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5515          DarwinRefKind == MCSymbolRefExpr::VK_None;
5516 }
5517 
5518 /// Force static initialization.
5519 extern "C" void LLVMInitializeAArch64AsmParser() {
5520   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5521   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5522   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5523   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
5524   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
5525 }
5526 
5527 #define GET_REGISTER_MATCHER
5528 #define GET_SUBTARGET_FEATURE_NAME
5529 #define GET_MATCHER_IMPLEMENTATION
5530 #define GET_MNEMONIC_SPELL_CHECKER
5531 #include "AArch64GenAsmMatcher.inc"
5532 
5533 // Define this matcher function after the auto-generated include so we
5534 // have the match class enum definitions.
5535 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5536                                                       unsigned Kind) {
5537   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5538   // If the kind is a token for a literal immediate, check if our asm
5539   // operand matches. This is for InstAliases which have a fixed-value
5540   // immediate in the syntax.
5541   int64_t ExpectedVal;
5542   switch (Kind) {
5543   default:
5544     return Match_InvalidOperand;
5545   case MCK__35_0:
5546     ExpectedVal = 0;
5547     break;
5548   case MCK__35_1:
5549     ExpectedVal = 1;
5550     break;
5551   case MCK__35_12:
5552     ExpectedVal = 12;
5553     break;
5554   case MCK__35_16:
5555     ExpectedVal = 16;
5556     break;
5557   case MCK__35_2:
5558     ExpectedVal = 2;
5559     break;
5560   case MCK__35_24:
5561     ExpectedVal = 24;
5562     break;
5563   case MCK__35_3:
5564     ExpectedVal = 3;
5565     break;
5566   case MCK__35_32:
5567     ExpectedVal = 32;
5568     break;
5569   case MCK__35_4:
5570     ExpectedVal = 4;
5571     break;
5572   case MCK__35_48:
5573     ExpectedVal = 48;
5574     break;
5575   case MCK__35_6:
5576     ExpectedVal = 6;
5577     break;
5578   case MCK__35_64:
5579     ExpectedVal = 64;
5580     break;
5581   case MCK__35_8:
5582     ExpectedVal = 8;
5583     break;
5584   }
5585   if (!Op.isImm())
5586     return Match_InvalidOperand;
5587   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5588   if (!CE)
5589     return Match_InvalidOperand;
5590   if (CE->getValue() == ExpectedVal)
5591     return Match_Success;
5592   return Match_InvalidOperand;
5593 }
5594 
5595 OperandMatchResultTy
5596 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5597 
5598   SMLoc S = getLoc();
5599 
5600   if (getParser().getTok().isNot(AsmToken::Identifier)) {
5601     Error(S, "expected register");
5602     return MatchOperand_ParseFail;
5603   }
5604 
5605   unsigned FirstReg;
5606   OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5607   if (Res != MatchOperand_Success)
5608     return MatchOperand_ParseFail;
5609 
5610   const MCRegisterClass &WRegClass =
5611       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5612   const MCRegisterClass &XRegClass =
5613       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5614 
5615   bool isXReg = XRegClass.contains(FirstReg),
5616        isWReg = WRegClass.contains(FirstReg);
5617   if (!isXReg && !isWReg) {
5618     Error(S, "expected first even register of a "
5619              "consecutive same-size even/odd register pair");
5620     return MatchOperand_ParseFail;
5621   }
5622 
5623   const MCRegisterInfo *RI = getContext().getRegisterInfo();
5624   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5625 
5626   if (FirstEncoding & 0x1) {
5627     Error(S, "expected first even register of a "
5628              "consecutive same-size even/odd register pair");
5629     return MatchOperand_ParseFail;
5630   }
5631 
5632   if (getParser().getTok().isNot(AsmToken::Comma)) {
5633     Error(getLoc(), "expected comma");
5634     return MatchOperand_ParseFail;
5635   }
5636   // Eat the comma
5637   getParser().Lex();
5638 
5639   SMLoc E = getLoc();
5640   unsigned SecondReg;
5641   Res = tryParseScalarRegister(SecondReg);
5642   if (Res != MatchOperand_Success)
5643     return MatchOperand_ParseFail;
5644 
5645   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5646       (isXReg && !XRegClass.contains(SecondReg)) ||
5647       (isWReg && !WRegClass.contains(SecondReg))) {
5648     Error(E,"expected second odd register of a "
5649              "consecutive same-size even/odd register pair");
5650     return MatchOperand_ParseFail;
5651   }
5652 
5653   unsigned Pair = 0;
5654   if (isXReg) {
5655     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5656            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5657   } else {
5658     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5659            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5660   }
5661 
5662   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5663       getLoc(), getContext()));
5664 
5665   return MatchOperand_Success;
5666 }
5667 
5668 template <bool ParseShiftExtend, bool ParseSuffix>
5669 OperandMatchResultTy
5670 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5671   const SMLoc S = getLoc();
5672   // Check for a SVE vector register specifier first.
5673   unsigned RegNum;
5674   StringRef Kind;
5675 
5676   OperandMatchResultTy Res =
5677       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5678 
5679   if (Res != MatchOperand_Success)
5680     return Res;
5681 
5682   if (ParseSuffix && Kind.empty())
5683     return MatchOperand_NoMatch;
5684 
5685   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5686   if (!KindRes)
5687     return MatchOperand_NoMatch;
5688 
5689   unsigned ElementWidth = KindRes->second;
5690 
5691   // No shift/extend is the default.
5692   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5693     Operands.push_back(AArch64Operand::CreateVectorReg(
5694         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5695 
5696     OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5697     if (Res == MatchOperand_ParseFail)
5698       return MatchOperand_ParseFail;
5699     return MatchOperand_Success;
5700   }
5701 
5702   // Eat the comma
5703   getParser().Lex();
5704 
5705   // Match the shift
5706   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5707   Res = tryParseOptionalShiftExtend(ExtOpnd);
5708   if (Res != MatchOperand_Success)
5709     return Res;
5710 
5711   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5712   Operands.push_back(AArch64Operand::CreateVectorReg(
5713       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5714       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5715       Ext->hasShiftExtendAmount()));
5716 
5717   return MatchOperand_Success;
5718 }
5719 
5720 OperandMatchResultTy
5721 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5722   MCAsmParser &Parser = getParser();
5723 
5724   SMLoc SS = getLoc();
5725   const AsmToken &TokE = Parser.getTok();
5726   bool IsHash = TokE.is(AsmToken::Hash);
5727 
5728   if (!IsHash && TokE.isNot(AsmToken::Identifier))
5729     return MatchOperand_NoMatch;
5730 
5731   int64_t Pattern;
5732   if (IsHash) {
5733     Parser.Lex(); // Eat hash
5734 
5735     // Parse the immediate operand.
5736     const MCExpr *ImmVal;
5737     SS = getLoc();
5738     if (Parser.parseExpression(ImmVal))
5739       return MatchOperand_ParseFail;
5740 
5741     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5742     if (!MCE)
5743       return MatchOperand_ParseFail;
5744 
5745     Pattern = MCE->getValue();
5746   } else {
5747     // Parse the pattern
5748     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5749     if (!Pat)
5750       return MatchOperand_NoMatch;
5751 
5752     Parser.Lex();
5753     Pattern = Pat->Encoding;
5754     assert(Pattern >= 0 && Pattern < 32);
5755   }
5756 
5757   Operands.push_back(
5758       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5759                                 SS, getLoc(), getContext()));
5760 
5761   return MatchOperand_Success;
5762 }
5763