1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "AArch64InstrInfo.h"
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64InstPrinter.h"
12 #include "MCTargetDesc/AArch64MCExpr.h"
13 #include "MCTargetDesc/AArch64MCTargetDesc.h"
14 #include "MCTargetDesc/AArch64TargetStreamer.h"
15 #include "TargetInfo/AArch64TargetInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCLinkerOptimizationHint.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetOptions.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/SMLoc.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/TargetParser/AArch64TargetParser.h"
52 #include "llvm/TargetParser/SubtargetFeature.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <optional>
58 #include <string>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62
63 using namespace llvm;
64
65 namespace {
66
67 enum class RegKind {
68 Scalar,
69 NeonVector,
70 SVEDataVector,
71 SVEPredicateAsCounter,
72 SVEPredicateVector,
73 Matrix,
74 LookupTable
75 };
76
77 enum class MatrixKind { Array, Tile, Row, Col };
78
79 enum RegConstraintEqualityTy {
80 EqualsReg,
81 EqualsSuperReg,
82 EqualsSubReg
83 };
84
85 class AArch64AsmParser : public MCTargetAsmParser {
86 private:
87 StringRef Mnemonic; ///< Instruction mnemonic.
88
89 // Map of register aliases registers via the .req directive.
90 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
91
92 class PrefixInfo {
93 public:
CreateFromInst(const MCInst & Inst,uint64_t TSFlags)94 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95 PrefixInfo Prefix;
96 switch (Inst.getOpcode()) {
97 case AArch64::MOVPRFX_ZZ:
98 Prefix.Active = true;
99 Prefix.Dst = Inst.getOperand(0).getReg();
100 break;
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
105 Prefix.Active = true;
106 Prefix.Predicated = true;
107 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109 "No destructive element size set for movprfx");
110 Prefix.Dst = Inst.getOperand(0).getReg();
111 Prefix.Pg = Inst.getOperand(2).getReg();
112 break;
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
117 Prefix.Active = true;
118 Prefix.Predicated = true;
119 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121 "No destructive element size set for movprfx");
122 Prefix.Dst = Inst.getOperand(0).getReg();
123 Prefix.Pg = Inst.getOperand(1).getReg();
124 break;
125 default:
126 break;
127 }
128
129 return Prefix;
130 }
131
132 PrefixInfo() = default;
isActive() const133 bool isActive() const { return Active; }
isPredicated() const134 bool isPredicated() const { return Predicated; }
getElementSize() const135 unsigned getElementSize() const {
136 assert(Predicated);
137 return ElementSize;
138 }
getDstReg() const139 unsigned getDstReg() const { return Dst; }
getPgReg() const140 unsigned getPgReg() const {
141 assert(Predicated);
142 return Pg;
143 }
144
145 private:
146 bool Active = false;
147 bool Predicated = false;
148 unsigned ElementSize;
149 unsigned Dst;
150 unsigned Pg;
151 } NextPrefix;
152
getTargetStreamer()153 AArch64TargetStreamer &getTargetStreamer() {
154 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
155 return static_cast<AArch64TargetStreamer &>(TS);
156 }
157
getLoc() const158 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159
160 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164 std::string &Suggestion);
165 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
167 bool parseRegister(OperandVector &Operands);
168 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169 bool parseNeonVectorList(OperandVector &Operands);
170 bool parseOptionalMulOperand(OperandVector &Operands);
171 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172 bool parseKeywordOperand(OperandVector &Operands);
173 bool parseOperand(OperandVector &Operands, bool isCondCode,
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
176 bool parseComma();
177 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178 unsigned Last);
179
180 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
181 OperandVector &Operands);
182
183 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184
185 bool parseDirectiveArch(SMLoc L);
186 bool parseDirectiveArchExtension(SMLoc L);
187 bool parseDirectiveCPU(SMLoc L);
188 bool parseDirectiveInst(SMLoc L);
189
190 bool parseDirectiveTLSDescCall(SMLoc L);
191
192 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193 bool parseDirectiveLtorg(SMLoc L);
194
195 bool parseDirectiveReq(StringRef Name, SMLoc L);
196 bool parseDirectiveUnreq(SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
200
201 bool parseDirectiveVariantPCS(SMLoc L);
202
203 bool parseDirectiveSEHAllocStack(SMLoc L);
204 bool parseDirectiveSEHPrologEnd(SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208 bool parseDirectiveSEHSaveReg(SMLoc L);
209 bool parseDirectiveSEHSaveRegX(SMLoc L);
210 bool parseDirectiveSEHSaveRegP(SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(SMLoc L);
213 bool parseDirectiveSEHSaveFReg(SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217 bool parseDirectiveSEHSetFP(SMLoc L);
218 bool parseDirectiveSEHAddFP(SMLoc L);
219 bool parseDirectiveSEHNop(SMLoc L);
220 bool parseDirectiveSEHSaveNext(SMLoc L);
221 bool parseDirectiveSEHEpilogStart(SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(SMLoc L);
223 bool parseDirectiveSEHTrapFrame(SMLoc L);
224 bool parseDirectiveSEHMachineFrame(SMLoc L);
225 bool parseDirectiveSEHContext(SMLoc L);
226 bool parseDirectiveSEHECContext(SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228 bool parseDirectiveSEHPACSignLR(SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230
231 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
232 SmallVectorImpl<SMLoc> &Loc);
233 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
235 OperandVector &Operands, MCStreamer &Out,
236 uint64_t &ErrorInfo,
237 bool MatchingInlineAsm) override;
238 /// @name Auto-generated Match Functions
239 /// {
240
241 #define GET_ASSEMBLER_HEADER
242 #include "AArch64GenAsmMatcher.inc"
243
244 /// }
245
246 ParseStatus tryParseScalarRegister(MCRegister &Reg);
247 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248 RegKind MatchKind);
249 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250 ParseStatus tryParseSVCR(OperandVector &Operands);
251 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254 ParseStatus tryParseSysReg(OperandVector &Operands);
255 ParseStatus tryParseSysCROperand(OperandVector &Operands);
256 template <bool IsSVEPrefetch = false>
257 ParseStatus tryParsePrefetch(OperandVector &Operands);
258 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259 ParseStatus tryParsePSBHint(OperandVector &Operands);
260 ParseStatus tryParseBTIHint(OperandVector &Operands);
261 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262 ParseStatus tryParseAdrLabel(OperandVector &Operands);
263 template <bool AddFPZeroAsLiteral>
264 ParseStatus tryParseFPImm(OperandVector &Operands);
265 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267 bool tryParseNeonVectorRegister(OperandVector &Operands);
268 ParseStatus tryParseVectorIndex(OperandVector &Operands);
269 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271 template <bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273 ParseStatus tryParseGPROperand(OperandVector &Operands);
274 ParseStatus tryParseZTOperand(OperandVector &Operands);
275 template <bool ParseShiftExtend, bool ParseSuffix>
276 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277 template <RegKind RK>
278 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
279 ParseStatus
280 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
281 template <RegKind VectorKind>
282 ParseStatus tryParseVectorList(OperandVector &Operands,
283 bool ExpectMatch = false);
284 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
285 ParseStatus tryParseSVEPattern(OperandVector &Operands);
286 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
287 ParseStatus tryParseGPR64x8(OperandVector &Operands);
288 ParseStatus tryParseImmRange(OperandVector &Operands);
289
290 public:
291 enum AArch64MatchResultTy {
292 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
293 #define GET_OPERAND_DIAGNOSTIC_TYPES
294 #include "AArch64GenAsmMatcher.inc"
295 };
296 bool IsILP32;
297 bool IsWindowsArm64EC;
298
AArch64AsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)299 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
300 const MCInstrInfo &MII, const MCTargetOptions &Options)
301 : MCTargetAsmParser(Options, STI, MII) {
302 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
303 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
304 MCAsmParserExtension::Initialize(Parser);
305 MCStreamer &S = getParser().getStreamer();
306 if (S.getTargetStreamer() == nullptr)
307 new AArch64TargetStreamer(S);
308
309 // Alias .hword/.word/.[dx]word to the target-independent
310 // .2byte/.4byte/.8byte directives as they have the same form and
311 // semantics:
312 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
313 Parser.addAliasForDirective(".hword", ".2byte");
314 Parser.addAliasForDirective(".word", ".4byte");
315 Parser.addAliasForDirective(".dword", ".8byte");
316 Parser.addAliasForDirective(".xword", ".8byte");
317
318 // Initialize the set of available features.
319 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
320 }
321
322 bool areEqualRegs(const MCParsedAsmOperand &Op1,
323 const MCParsedAsmOperand &Op2) const override;
324 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
325 SMLoc NameLoc, OperandVector &Operands) override;
326 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
327 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
328 SMLoc &EndLoc) override;
329 bool ParseDirective(AsmToken DirectiveID) override;
330 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
331 unsigned Kind) override;
332
333 bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
334
335 static bool classifySymbolRef(const MCExpr *Expr,
336 AArch64MCExpr::VariantKind &ELFRefKind,
337 MCSymbolRefExpr::VariantKind &DarwinRefKind,
338 int64_t &Addend);
339 };
340
341 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
342 /// instruction.
343 class AArch64Operand : public MCParsedAsmOperand {
344 private:
345 enum KindTy {
346 k_Immediate,
347 k_ShiftedImm,
348 k_ImmRange,
349 k_CondCode,
350 k_Register,
351 k_MatrixRegister,
352 k_MatrixTileList,
353 k_SVCR,
354 k_VectorList,
355 k_VectorIndex,
356 k_Token,
357 k_SysReg,
358 k_SysCR,
359 k_Prefetch,
360 k_ShiftExtend,
361 k_FPImm,
362 k_Barrier,
363 k_PSBHint,
364 k_BTIHint,
365 } Kind;
366
367 SMLoc StartLoc, EndLoc;
368
369 struct TokOp {
370 const char *Data;
371 unsigned Length;
372 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
373 };
374
375 // Separate shift/extend operand.
376 struct ShiftExtendOp {
377 AArch64_AM::ShiftExtendType Type;
378 unsigned Amount;
379 bool HasExplicitAmount;
380 };
381
382 struct RegOp {
383 unsigned RegNum;
384 RegKind Kind;
385 int ElementWidth;
386
387 // The register may be allowed as a different register class,
388 // e.g. for GPR64as32 or GPR32as64.
389 RegConstraintEqualityTy EqualityTy;
390
391 // In some cases the shift/extend needs to be explicitly parsed together
392 // with the register, rather than as a separate operand. This is needed
393 // for addressing modes where the instruction as a whole dictates the
394 // scaling/extend, rather than specific bits in the instruction.
395 // By parsing them as a single operand, we avoid the need to pass an
396 // extra operand in all CodeGen patterns (because all operands need to
397 // have an associated value), and we avoid the need to update TableGen to
398 // accept operands that have no associated bits in the instruction.
399 //
400 // An added benefit of parsing them together is that the assembler
401 // can give a sensible diagnostic if the scaling is not correct.
402 //
403 // The default is 'lsl #0' (HasExplicitAmount = false) if no
404 // ShiftExtend is specified.
405 ShiftExtendOp ShiftExtend;
406 };
407
408 struct MatrixRegOp {
409 unsigned RegNum;
410 unsigned ElementWidth;
411 MatrixKind Kind;
412 };
413
414 struct MatrixTileListOp {
415 unsigned RegMask = 0;
416 };
417
418 struct VectorListOp {
419 unsigned RegNum;
420 unsigned Count;
421 unsigned Stride;
422 unsigned NumElements;
423 unsigned ElementWidth;
424 RegKind RegisterKind;
425 };
426
427 struct VectorIndexOp {
428 int Val;
429 };
430
431 struct ImmOp {
432 const MCExpr *Val;
433 };
434
435 struct ShiftedImmOp {
436 const MCExpr *Val;
437 unsigned ShiftAmount;
438 };
439
440 struct ImmRangeOp {
441 unsigned First;
442 unsigned Last;
443 };
444
445 struct CondCodeOp {
446 AArch64CC::CondCode Code;
447 };
448
449 struct FPImmOp {
450 uint64_t Val; // APFloat value bitcasted to uint64_t.
451 bool IsExact; // describes whether parsed value was exact.
452 };
453
454 struct BarrierOp {
455 const char *Data;
456 unsigned Length;
457 unsigned Val; // Not the enum since not all values have names.
458 bool HasnXSModifier;
459 };
460
461 struct SysRegOp {
462 const char *Data;
463 unsigned Length;
464 uint32_t MRSReg;
465 uint32_t MSRReg;
466 uint32_t PStateField;
467 };
468
469 struct SysCRImmOp {
470 unsigned Val;
471 };
472
473 struct PrefetchOp {
474 const char *Data;
475 unsigned Length;
476 unsigned Val;
477 };
478
479 struct PSBHintOp {
480 const char *Data;
481 unsigned Length;
482 unsigned Val;
483 };
484
485 struct BTIHintOp {
486 const char *Data;
487 unsigned Length;
488 unsigned Val;
489 };
490
491 struct SVCROp {
492 const char *Data;
493 unsigned Length;
494 unsigned PStateField;
495 };
496
497 union {
498 struct TokOp Tok;
499 struct RegOp Reg;
500 struct MatrixRegOp MatrixReg;
501 struct MatrixTileListOp MatrixTileList;
502 struct VectorListOp VectorList;
503 struct VectorIndexOp VectorIndex;
504 struct ImmOp Imm;
505 struct ShiftedImmOp ShiftedImm;
506 struct ImmRangeOp ImmRange;
507 struct CondCodeOp CondCode;
508 struct FPImmOp FPImm;
509 struct BarrierOp Barrier;
510 struct SysRegOp SysReg;
511 struct SysCRImmOp SysCRImm;
512 struct PrefetchOp Prefetch;
513 struct PSBHintOp PSBHint;
514 struct BTIHintOp BTIHint;
515 struct ShiftExtendOp ShiftExtend;
516 struct SVCROp SVCR;
517 };
518
519 // Keep the MCContext around as the MCExprs may need manipulated during
520 // the add<>Operands() calls.
521 MCContext &Ctx;
522
523 public:
AArch64Operand(KindTy K,MCContext & Ctx)524 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
525
AArch64Operand(const AArch64Operand & o)526 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
527 Kind = o.Kind;
528 StartLoc = o.StartLoc;
529 EndLoc = o.EndLoc;
530 switch (Kind) {
531 case k_Token:
532 Tok = o.Tok;
533 break;
534 case k_Immediate:
535 Imm = o.Imm;
536 break;
537 case k_ShiftedImm:
538 ShiftedImm = o.ShiftedImm;
539 break;
540 case k_ImmRange:
541 ImmRange = o.ImmRange;
542 break;
543 case k_CondCode:
544 CondCode = o.CondCode;
545 break;
546 case k_FPImm:
547 FPImm = o.FPImm;
548 break;
549 case k_Barrier:
550 Barrier = o.Barrier;
551 break;
552 case k_Register:
553 Reg = o.Reg;
554 break;
555 case k_MatrixRegister:
556 MatrixReg = o.MatrixReg;
557 break;
558 case k_MatrixTileList:
559 MatrixTileList = o.MatrixTileList;
560 break;
561 case k_VectorList:
562 VectorList = o.VectorList;
563 break;
564 case k_VectorIndex:
565 VectorIndex = o.VectorIndex;
566 break;
567 case k_SysReg:
568 SysReg = o.SysReg;
569 break;
570 case k_SysCR:
571 SysCRImm = o.SysCRImm;
572 break;
573 case k_Prefetch:
574 Prefetch = o.Prefetch;
575 break;
576 case k_PSBHint:
577 PSBHint = o.PSBHint;
578 break;
579 case k_BTIHint:
580 BTIHint = o.BTIHint;
581 break;
582 case k_ShiftExtend:
583 ShiftExtend = o.ShiftExtend;
584 break;
585 case k_SVCR:
586 SVCR = o.SVCR;
587 break;
588 }
589 }
590
591 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const592 SMLoc getStartLoc() const override { return StartLoc; }
593 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const594 SMLoc getEndLoc() const override { return EndLoc; }
595
getToken() const596 StringRef getToken() const {
597 assert(Kind == k_Token && "Invalid access!");
598 return StringRef(Tok.Data, Tok.Length);
599 }
600
isTokenSuffix() const601 bool isTokenSuffix() const {
602 assert(Kind == k_Token && "Invalid access!");
603 return Tok.IsSuffix;
604 }
605
getImm() const606 const MCExpr *getImm() const {
607 assert(Kind == k_Immediate && "Invalid access!");
608 return Imm.Val;
609 }
610
getShiftedImmVal() const611 const MCExpr *getShiftedImmVal() const {
612 assert(Kind == k_ShiftedImm && "Invalid access!");
613 return ShiftedImm.Val;
614 }
615
getShiftedImmShift() const616 unsigned getShiftedImmShift() const {
617 assert(Kind == k_ShiftedImm && "Invalid access!");
618 return ShiftedImm.ShiftAmount;
619 }
620
getFirstImmVal() const621 unsigned getFirstImmVal() const {
622 assert(Kind == k_ImmRange && "Invalid access!");
623 return ImmRange.First;
624 }
625
getLastImmVal() const626 unsigned getLastImmVal() const {
627 assert(Kind == k_ImmRange && "Invalid access!");
628 return ImmRange.Last;
629 }
630
getCondCode() const631 AArch64CC::CondCode getCondCode() const {
632 assert(Kind == k_CondCode && "Invalid access!");
633 return CondCode.Code;
634 }
635
getFPImm() const636 APFloat getFPImm() const {
637 assert (Kind == k_FPImm && "Invalid access!");
638 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
639 }
640
getFPImmIsExact() const641 bool getFPImmIsExact() const {
642 assert (Kind == k_FPImm && "Invalid access!");
643 return FPImm.IsExact;
644 }
645
getBarrier() const646 unsigned getBarrier() const {
647 assert(Kind == k_Barrier && "Invalid access!");
648 return Barrier.Val;
649 }
650
getBarrierName() const651 StringRef getBarrierName() const {
652 assert(Kind == k_Barrier && "Invalid access!");
653 return StringRef(Barrier.Data, Barrier.Length);
654 }
655
getBarriernXSModifier() const656 bool getBarriernXSModifier() const {
657 assert(Kind == k_Barrier && "Invalid access!");
658 return Barrier.HasnXSModifier;
659 }
660
getReg() const661 MCRegister getReg() const override {
662 assert(Kind == k_Register && "Invalid access!");
663 return Reg.RegNum;
664 }
665
getMatrixReg() const666 unsigned getMatrixReg() const {
667 assert(Kind == k_MatrixRegister && "Invalid access!");
668 return MatrixReg.RegNum;
669 }
670
getMatrixElementWidth() const671 unsigned getMatrixElementWidth() const {
672 assert(Kind == k_MatrixRegister && "Invalid access!");
673 return MatrixReg.ElementWidth;
674 }
675
getMatrixKind() const676 MatrixKind getMatrixKind() const {
677 assert(Kind == k_MatrixRegister && "Invalid access!");
678 return MatrixReg.Kind;
679 }
680
getMatrixTileListRegMask() const681 unsigned getMatrixTileListRegMask() const {
682 assert(isMatrixTileList() && "Invalid access!");
683 return MatrixTileList.RegMask;
684 }
685
getRegEqualityTy() const686 RegConstraintEqualityTy getRegEqualityTy() const {
687 assert(Kind == k_Register && "Invalid access!");
688 return Reg.EqualityTy;
689 }
690
getVectorListStart() const691 unsigned getVectorListStart() const {
692 assert(Kind == k_VectorList && "Invalid access!");
693 return VectorList.RegNum;
694 }
695
getVectorListCount() const696 unsigned getVectorListCount() const {
697 assert(Kind == k_VectorList && "Invalid access!");
698 return VectorList.Count;
699 }
700
getVectorListStride() const701 unsigned getVectorListStride() const {
702 assert(Kind == k_VectorList && "Invalid access!");
703 return VectorList.Stride;
704 }
705
getVectorIndex() const706 int getVectorIndex() const {
707 assert(Kind == k_VectorIndex && "Invalid access!");
708 return VectorIndex.Val;
709 }
710
getSysReg() const711 StringRef getSysReg() const {
712 assert(Kind == k_SysReg && "Invalid access!");
713 return StringRef(SysReg.Data, SysReg.Length);
714 }
715
getSysCR() const716 unsigned getSysCR() const {
717 assert(Kind == k_SysCR && "Invalid access!");
718 return SysCRImm.Val;
719 }
720
getPrefetch() const721 unsigned getPrefetch() const {
722 assert(Kind == k_Prefetch && "Invalid access!");
723 return Prefetch.Val;
724 }
725
getPSBHint() const726 unsigned getPSBHint() const {
727 assert(Kind == k_PSBHint && "Invalid access!");
728 return PSBHint.Val;
729 }
730
getPSBHintName() const731 StringRef getPSBHintName() const {
732 assert(Kind == k_PSBHint && "Invalid access!");
733 return StringRef(PSBHint.Data, PSBHint.Length);
734 }
735
getBTIHint() const736 unsigned getBTIHint() const {
737 assert(Kind == k_BTIHint && "Invalid access!");
738 return BTIHint.Val;
739 }
740
getBTIHintName() const741 StringRef getBTIHintName() const {
742 assert(Kind == k_BTIHint && "Invalid access!");
743 return StringRef(BTIHint.Data, BTIHint.Length);
744 }
745
getSVCR() const746 StringRef getSVCR() const {
747 assert(Kind == k_SVCR && "Invalid access!");
748 return StringRef(SVCR.Data, SVCR.Length);
749 }
750
getPrefetchName() const751 StringRef getPrefetchName() const {
752 assert(Kind == k_Prefetch && "Invalid access!");
753 return StringRef(Prefetch.Data, Prefetch.Length);
754 }
755
getShiftExtendType() const756 AArch64_AM::ShiftExtendType getShiftExtendType() const {
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Type;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Type;
761 llvm_unreachable("Invalid access!");
762 }
763
getShiftExtendAmount() const764 unsigned getShiftExtendAmount() const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.Amount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.Amount;
769 llvm_unreachable("Invalid access!");
770 }
771
hasShiftExtendAmount() const772 bool hasShiftExtendAmount() const {
773 if (Kind == k_ShiftExtend)
774 return ShiftExtend.HasExplicitAmount;
775 if (Kind == k_Register)
776 return Reg.ShiftExtend.HasExplicitAmount;
777 llvm_unreachable("Invalid access!");
778 }
779
isImm() const780 bool isImm() const override { return Kind == k_Immediate; }
isMem() const781 bool isMem() const override { return false; }
782
isUImm6() const783 bool isUImm6() const {
784 if (!isImm())
785 return false;
786 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
787 if (!MCE)
788 return false;
789 int64_t Val = MCE->getValue();
790 return (Val >= 0 && Val < 64);
791 }
792
isSImm() const793 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
794
isSImmScaled() const795 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
796 return isImmScaled<Bits, Scale>(true);
797 }
798
799 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
isUImmScaled() const800 DiagnosticPredicate isUImmScaled() const {
801 if (IsRange && isImmRange() &&
802 (getLastImmVal() != getFirstImmVal() + Offset))
803 return DiagnosticPredicateTy::NoMatch;
804
805 return isImmScaled<Bits, Scale, IsRange>(false);
806 }
807
808 template <int Bits, int Scale, bool IsRange = false>
isImmScaled(bool Signed) const809 DiagnosticPredicate isImmScaled(bool Signed) const {
810 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
811 (isImmRange() && !IsRange))
812 return DiagnosticPredicateTy::NoMatch;
813
814 int64_t Val;
815 if (isImmRange())
816 Val = getFirstImmVal();
817 else {
818 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
819 if (!MCE)
820 return DiagnosticPredicateTy::NoMatch;
821 Val = MCE->getValue();
822 }
823
824 int64_t MinVal, MaxVal;
825 if (Signed) {
826 int64_t Shift = Bits - 1;
827 MinVal = (int64_t(1) << Shift) * -Scale;
828 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
829 } else {
830 MinVal = 0;
831 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
832 }
833
834 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
835 return DiagnosticPredicateTy::Match;
836
837 return DiagnosticPredicateTy::NearMatch;
838 }
839
isSVEPattern() const840 DiagnosticPredicate isSVEPattern() const {
841 if (!isImm())
842 return DiagnosticPredicateTy::NoMatch;
843 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
844 if (!MCE)
845 return DiagnosticPredicateTy::NoMatch;
846 int64_t Val = MCE->getValue();
847 if (Val >= 0 && Val < 32)
848 return DiagnosticPredicateTy::Match;
849 return DiagnosticPredicateTy::NearMatch;
850 }
851
isSVEVecLenSpecifier() const852 DiagnosticPredicate isSVEVecLenSpecifier() const {
853 if (!isImm())
854 return DiagnosticPredicateTy::NoMatch;
855 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
856 if (!MCE)
857 return DiagnosticPredicateTy::NoMatch;
858 int64_t Val = MCE->getValue();
859 if (Val >= 0 && Val <= 1)
860 return DiagnosticPredicateTy::Match;
861 return DiagnosticPredicateTy::NearMatch;
862 }
863
isSymbolicUImm12Offset(const MCExpr * Expr) const864 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
865 AArch64MCExpr::VariantKind ELFRefKind;
866 MCSymbolRefExpr::VariantKind DarwinRefKind;
867 int64_t Addend;
868 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
869 Addend)) {
870 // If we don't understand the expression, assume the best and
871 // let the fixup and relocation code deal with it.
872 return true;
873 }
874
875 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
876 ELFRefKind == AArch64MCExpr::VK_LO12 ||
877 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
878 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
879 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
880 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
881 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
882 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
883 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
884 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
885 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
886 ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
887 // Note that we don't range-check the addend. It's adjusted modulo page
888 // size when converted, so there is no "out of range" condition when using
889 // @pageoff.
890 return true;
891 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
892 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
893 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
894 return Addend == 0;
895 }
896
897 return false;
898 }
899
isUImm12Offset() const900 template <int Scale> bool isUImm12Offset() const {
901 if (!isImm())
902 return false;
903
904 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
905 if (!MCE)
906 return isSymbolicUImm12Offset(getImm());
907
908 int64_t Val = MCE->getValue();
909 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
910 }
911
912 template <int N, int M>
isImmInRange() const913 bool isImmInRange() const {
914 if (!isImm())
915 return false;
916 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
917 if (!MCE)
918 return false;
919 int64_t Val = MCE->getValue();
920 return (Val >= N && Val <= M);
921 }
922
923 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
924 // a logical immediate can always be represented when inverted.
925 template <typename T>
isLogicalImm() const926 bool isLogicalImm() const {
927 if (!isImm())
928 return false;
929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
930 if (!MCE)
931 return false;
932
933 int64_t Val = MCE->getValue();
934 // Avoid left shift by 64 directly.
935 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
936 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
937 if ((Val & Upper) && (Val & Upper) != Upper)
938 return false;
939
940 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
941 }
942
isShiftedImm() const943 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
944
isImmRange() const945 bool isImmRange() const { return Kind == k_ImmRange; }
946
947 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
948 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
949 /// immediate that can be shifted by 'Shift'.
950 template <unsigned Width>
getShiftedVal() const951 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
952 if (isShiftedImm() && Width == getShiftedImmShift())
953 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
954 return std::make_pair(CE->getValue(), Width);
955
956 if (isImm())
957 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
958 int64_t Val = CE->getValue();
959 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
960 return std::make_pair(Val >> Width, Width);
961 else
962 return std::make_pair(Val, 0u);
963 }
964
965 return {};
966 }
967
isAddSubImm() const968 bool isAddSubImm() const {
969 if (!isShiftedImm() && !isImm())
970 return false;
971
972 const MCExpr *Expr;
973
974 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
975 if (isShiftedImm()) {
976 unsigned Shift = ShiftedImm.ShiftAmount;
977 Expr = ShiftedImm.Val;
978 if (Shift != 0 && Shift != 12)
979 return false;
980 } else {
981 Expr = getImm();
982 }
983
984 AArch64MCExpr::VariantKind ELFRefKind;
985 MCSymbolRefExpr::VariantKind DarwinRefKind;
986 int64_t Addend;
987 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
988 DarwinRefKind, Addend)) {
989 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
990 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
991 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
992 || ELFRefKind == AArch64MCExpr::VK_LO12
993 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
994 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
995 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
996 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
997 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
998 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
999 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
1000 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
1001 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
1002 }
1003
1004 // If it's a constant, it should be a real immediate in range.
1005 if (auto ShiftedVal = getShiftedVal<12>())
1006 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1007
1008 // If it's an expression, we hope for the best and let the fixup/relocation
1009 // code deal with it.
1010 return true;
1011 }
1012
isAddSubImmNeg() const1013 bool isAddSubImmNeg() const {
1014 if (!isShiftedImm() && !isImm())
1015 return false;
1016
1017 // Otherwise it should be a real negative immediate in range.
1018 if (auto ShiftedVal = getShiftedVal<12>())
1019 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1020
1021 return false;
1022 }
1023
1024 // Signed value in the range -128 to +127. For element widths of
1025 // 16 bits or higher it may also be a signed multiple of 256 in the
1026 // range -32768 to +32512.
1027 // For element-width of 8 bits a range of -128 to 255 is accepted,
1028 // since a copy of a byte can be either signed/unsigned.
1029 template <typename T>
isSVECpyImm() const1030 DiagnosticPredicate isSVECpyImm() const {
1031 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1032 return DiagnosticPredicateTy::NoMatch;
1033
1034 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1035 std::is_same<int8_t, T>::value;
1036 if (auto ShiftedImm = getShiftedVal<8>())
1037 if (!(IsByte && ShiftedImm->second) &&
1038 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1039 << ShiftedImm->second))
1040 return DiagnosticPredicateTy::Match;
1041
1042 return DiagnosticPredicateTy::NearMatch;
1043 }
1044
1045 // Unsigned value in the range 0 to 255. For element widths of
1046 // 16 bits or higher it may also be a signed multiple of 256 in the
1047 // range 0 to 65280.
isSVEAddSubImm() const1048 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1049 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1050 return DiagnosticPredicateTy::NoMatch;
1051
1052 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1053 std::is_same<int8_t, T>::value;
1054 if (auto ShiftedImm = getShiftedVal<8>())
1055 if (!(IsByte && ShiftedImm->second) &&
1056 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1057 << ShiftedImm->second))
1058 return DiagnosticPredicateTy::Match;
1059
1060 return DiagnosticPredicateTy::NearMatch;
1061 }
1062
isSVEPreferredLogicalImm() const1063 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1064 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1065 return DiagnosticPredicateTy::Match;
1066 return DiagnosticPredicateTy::NoMatch;
1067 }
1068
isCondCode() const1069 bool isCondCode() const { return Kind == k_CondCode; }
1070
isSIMDImmType10() const1071 bool isSIMDImmType10() const {
1072 if (!isImm())
1073 return false;
1074 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1075 if (!MCE)
1076 return false;
1077 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1078 }
1079
1080 template<int N>
isBranchTarget() const1081 bool isBranchTarget() const {
1082 if (!isImm())
1083 return false;
1084 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1085 if (!MCE)
1086 return true;
1087 int64_t Val = MCE->getValue();
1088 if (Val & 0x3)
1089 return false;
1090 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1091 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1092 }
1093
1094 bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const1095 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1096 if (!isImm())
1097 return false;
1098
1099 AArch64MCExpr::VariantKind ELFRefKind;
1100 MCSymbolRefExpr::VariantKind DarwinRefKind;
1101 int64_t Addend;
1102 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1103 DarwinRefKind, Addend)) {
1104 return false;
1105 }
1106 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1107 return false;
1108
1109 return llvm::is_contained(AllowedModifiers, ELFRefKind);
1110 }
1111
isMovWSymbolG3() const1112 bool isMovWSymbolG3() const {
1113 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1114 }
1115
isMovWSymbolG2() const1116 bool isMovWSymbolG2() const {
1117 return isMovWSymbol(
1118 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1119 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1120 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1121 AArch64MCExpr::VK_DTPREL_G2});
1122 }
1123
isMovWSymbolG1() const1124 bool isMovWSymbolG1() const {
1125 return isMovWSymbol(
1126 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1127 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1128 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1129 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1130 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1131 }
1132
isMovWSymbolG0() const1133 bool isMovWSymbolG0() const {
1134 return isMovWSymbol(
1135 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1136 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1137 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1138 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1139 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1140 }
1141
1142 template<int RegWidth, int Shift>
isMOVZMovAlias() const1143 bool isMOVZMovAlias() const {
1144 if (!isImm()) return false;
1145
1146 const MCExpr *E = getImm();
1147 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1148 uint64_t Value = CE->getValue();
1149
1150 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1151 }
1152 // Only supports the case of Shift being 0 if an expression is used as an
1153 // operand
1154 return !Shift && E;
1155 }
1156
1157 template<int RegWidth, int Shift>
isMOVNMovAlias() const1158 bool isMOVNMovAlias() const {
1159 if (!isImm()) return false;
1160
1161 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1162 if (!CE) return false;
1163 uint64_t Value = CE->getValue();
1164
1165 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1166 }
1167
isFPImm() const1168 bool isFPImm() const {
1169 return Kind == k_FPImm &&
1170 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1171 }
1172
isBarrier() const1173 bool isBarrier() const {
1174 return Kind == k_Barrier && !getBarriernXSModifier();
1175 }
isBarriernXS() const1176 bool isBarriernXS() const {
1177 return Kind == k_Barrier && getBarriernXSModifier();
1178 }
isSysReg() const1179 bool isSysReg() const { return Kind == k_SysReg; }
1180
isMRSSystemRegister() const1181 bool isMRSSystemRegister() const {
1182 if (!isSysReg()) return false;
1183
1184 return SysReg.MRSReg != -1U;
1185 }
1186
isMSRSystemRegister() const1187 bool isMSRSystemRegister() const {
1188 if (!isSysReg()) return false;
1189 return SysReg.MSRReg != -1U;
1190 }
1191
isSystemPStateFieldWithImm0_1() const1192 bool isSystemPStateFieldWithImm0_1() const {
1193 if (!isSysReg()) return false;
1194 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1195 }
1196
isSystemPStateFieldWithImm0_15() const1197 bool isSystemPStateFieldWithImm0_15() const {
1198 if (!isSysReg())
1199 return false;
1200 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1201 }
1202
isSVCR() const1203 bool isSVCR() const {
1204 if (Kind != k_SVCR)
1205 return false;
1206 return SVCR.PStateField != -1U;
1207 }
1208
isReg() const1209 bool isReg() const override {
1210 return Kind == k_Register;
1211 }
1212
isVectorList() const1213 bool isVectorList() const { return Kind == k_VectorList; }
1214
isScalarReg() const1215 bool isScalarReg() const {
1216 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1217 }
1218
isNeonVectorReg() const1219 bool isNeonVectorReg() const {
1220 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1221 }
1222
isNeonVectorRegLo() const1223 bool isNeonVectorRegLo() const {
1224 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1225 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1226 Reg.RegNum) ||
1227 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1228 Reg.RegNum));
1229 }
1230
isNeonVectorReg0to7() const1231 bool isNeonVectorReg0to7() const {
1232 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1233 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1234 Reg.RegNum));
1235 }
1236
isMatrix() const1237 bool isMatrix() const { return Kind == k_MatrixRegister; }
isMatrixTileList() const1238 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1239
isSVEPredicateAsCounterReg() const1240 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1241 RegKind RK;
1242 switch (Class) {
1243 case AArch64::PPRRegClassID:
1244 case AArch64::PPR_3bRegClassID:
1245 case AArch64::PPR_p8to15RegClassID:
1246 case AArch64::PNRRegClassID:
1247 case AArch64::PNR_p8to15RegClassID:
1248 case AArch64::PPRorPNRRegClassID:
1249 RK = RegKind::SVEPredicateAsCounter;
1250 break;
1251 default:
1252 llvm_unreachable("Unsupport register class");
1253 }
1254
1255 return (Kind == k_Register && Reg.Kind == RK) &&
1256 AArch64MCRegisterClasses[Class].contains(getReg());
1257 }
1258
isSVEVectorReg() const1259 template <unsigned Class> bool isSVEVectorReg() const {
1260 RegKind RK;
1261 switch (Class) {
1262 case AArch64::ZPRRegClassID:
1263 case AArch64::ZPR_3bRegClassID:
1264 case AArch64::ZPR_4bRegClassID:
1265 RK = RegKind::SVEDataVector;
1266 break;
1267 case AArch64::PPRRegClassID:
1268 case AArch64::PPR_3bRegClassID:
1269 case AArch64::PPR_p8to15RegClassID:
1270 case AArch64::PNRRegClassID:
1271 case AArch64::PNR_p8to15RegClassID:
1272 case AArch64::PPRorPNRRegClassID:
1273 RK = RegKind::SVEPredicateVector;
1274 break;
1275 default:
1276 llvm_unreachable("Unsupport register class");
1277 }
1278
1279 return (Kind == k_Register && Reg.Kind == RK) &&
1280 AArch64MCRegisterClasses[Class].contains(getReg());
1281 }
1282
isFPRasZPR() const1283 template <unsigned Class> bool isFPRasZPR() const {
1284 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1285 AArch64MCRegisterClasses[Class].contains(getReg());
1286 }
1287
1288 template <int ElementWidth, unsigned Class>
isSVEPredicateVectorRegOfWidth() const1289 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1290 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1291 return DiagnosticPredicateTy::NoMatch;
1292
1293 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1294 return DiagnosticPredicateTy::Match;
1295
1296 return DiagnosticPredicateTy::NearMatch;
1297 }
1298
1299 template <int ElementWidth, unsigned Class>
isSVEPredicateOrPredicateAsCounterRegOfWidth() const1300 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1301 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1302 Reg.Kind != RegKind::SVEPredicateVector))
1303 return DiagnosticPredicateTy::NoMatch;
1304
1305 if ((isSVEPredicateAsCounterReg<Class>() ||
1306 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1307 Reg.ElementWidth == ElementWidth)
1308 return DiagnosticPredicateTy::Match;
1309
1310 return DiagnosticPredicateTy::NearMatch;
1311 }
1312
1313 template <int ElementWidth, unsigned Class>
isSVEPredicateAsCounterRegOfWidth() const1314 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1315 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1316 return DiagnosticPredicateTy::NoMatch;
1317
1318 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1319 return DiagnosticPredicateTy::Match;
1320
1321 return DiagnosticPredicateTy::NearMatch;
1322 }
1323
1324 template <int ElementWidth, unsigned Class>
isSVEDataVectorRegOfWidth() const1325 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1326 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1327 return DiagnosticPredicateTy::NoMatch;
1328
1329 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1330 return DiagnosticPredicateTy::Match;
1331
1332 return DiagnosticPredicateTy::NearMatch;
1333 }
1334
1335 template <int ElementWidth, unsigned Class,
1336 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1337 bool ShiftWidthAlwaysSame>
isSVEDataVectorRegWithShiftExtend() const1338 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1339 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1340 if (!VectorMatch.isMatch())
1341 return DiagnosticPredicateTy::NoMatch;
1342
1343 // Give a more specific diagnostic when the user has explicitly typed in
1344 // a shift-amount that does not match what is expected, but for which
1345 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1346 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1347 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1348 ShiftExtendTy == AArch64_AM::SXTW) &&
1349 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1350 return DiagnosticPredicateTy::NoMatch;
1351
1352 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1353 return DiagnosticPredicateTy::Match;
1354
1355 return DiagnosticPredicateTy::NearMatch;
1356 }
1357
isGPR32as64() const1358 bool isGPR32as64() const {
1359 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1360 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1361 }
1362
isGPR64as32() const1363 bool isGPR64as32() const {
1364 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1365 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1366 }
1367
isGPR64x8() const1368 bool isGPR64x8() const {
1369 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1371 Reg.RegNum);
1372 }
1373
isWSeqPair() const1374 bool isWSeqPair() const {
1375 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1376 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1377 Reg.RegNum);
1378 }
1379
isXSeqPair() const1380 bool isXSeqPair() const {
1381 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1383 Reg.RegNum);
1384 }
1385
isSyspXzrPair() const1386 bool isSyspXzrPair() const {
1387 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1388 }
1389
1390 template<int64_t Angle, int64_t Remainder>
isComplexRotation() const1391 DiagnosticPredicate isComplexRotation() const {
1392 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1393
1394 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1395 if (!CE) return DiagnosticPredicateTy::NoMatch;
1396 uint64_t Value = CE->getValue();
1397
1398 if (Value % Angle == Remainder && Value <= 270)
1399 return DiagnosticPredicateTy::Match;
1400 return DiagnosticPredicateTy::NearMatch;
1401 }
1402
isGPR64() const1403 template <unsigned RegClassID> bool isGPR64() const {
1404 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1406 }
1407
1408 template <unsigned RegClassID, int ExtWidth>
isGPR64WithShiftExtend() const1409 DiagnosticPredicate isGPR64WithShiftExtend() const {
1410 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1411 return DiagnosticPredicateTy::NoMatch;
1412
1413 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1414 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1415 return DiagnosticPredicateTy::Match;
1416 return DiagnosticPredicateTy::NearMatch;
1417 }
1418
1419 /// Is this a vector list with the type implicit (presumably attached to the
1420 /// instruction itself)?
1421 template <RegKind VectorKind, unsigned NumRegs>
isImplicitlyTypedVectorList() const1422 bool isImplicitlyTypedVectorList() const {
1423 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1424 VectorList.NumElements == 0 &&
1425 VectorList.RegisterKind == VectorKind;
1426 }
1427
1428 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1429 unsigned ElementWidth, unsigned Stride = 1>
isTypedVectorList() const1430 bool isTypedVectorList() const {
1431 if (Kind != k_VectorList)
1432 return false;
1433 if (VectorList.Count != NumRegs)
1434 return false;
1435 if (VectorList.RegisterKind != VectorKind)
1436 return false;
1437 if (VectorList.ElementWidth != ElementWidth)
1438 return false;
1439 if (VectorList.Stride != Stride)
1440 return false;
1441 return VectorList.NumElements == NumElements;
1442 }
1443
1444 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1445 unsigned ElementWidth>
isTypedVectorListMultiple() const1446 DiagnosticPredicate isTypedVectorListMultiple() const {
1447 bool Res =
1448 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1449 if (!Res)
1450 return DiagnosticPredicateTy::NoMatch;
1451 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1452 return DiagnosticPredicateTy::NearMatch;
1453 return DiagnosticPredicateTy::Match;
1454 }
1455
1456 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1457 unsigned ElementWidth>
isTypedVectorListStrided() const1458 DiagnosticPredicate isTypedVectorListStrided() const {
1459 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1460 ElementWidth, Stride>();
1461 if (!Res)
1462 return DiagnosticPredicateTy::NoMatch;
1463 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1464 ((VectorList.RegNum >= AArch64::Z16) &&
1465 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1466 return DiagnosticPredicateTy::Match;
1467 return DiagnosticPredicateTy::NoMatch;
1468 }
1469
1470 template <int Min, int Max>
isVectorIndex() const1471 DiagnosticPredicate isVectorIndex() const {
1472 if (Kind != k_VectorIndex)
1473 return DiagnosticPredicateTy::NoMatch;
1474 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1475 return DiagnosticPredicateTy::Match;
1476 return DiagnosticPredicateTy::NearMatch;
1477 }
1478
isToken() const1479 bool isToken() const override { return Kind == k_Token; }
1480
isTokenEqual(StringRef Str) const1481 bool isTokenEqual(StringRef Str) const {
1482 return Kind == k_Token && getToken() == Str;
1483 }
isSysCR() const1484 bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const1485 bool isPrefetch() const { return Kind == k_Prefetch; }
isPSBHint() const1486 bool isPSBHint() const { return Kind == k_PSBHint; }
isBTIHint() const1487 bool isBTIHint() const { return Kind == k_BTIHint; }
isShiftExtend() const1488 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const1489 bool isShifter() const {
1490 if (!isShiftExtend())
1491 return false;
1492
1493 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1494 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1495 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1496 ST == AArch64_AM::MSL);
1497 }
1498
isExactFPImm() const1499 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1500 if (Kind != k_FPImm)
1501 return DiagnosticPredicateTy::NoMatch;
1502
1503 if (getFPImmIsExact()) {
1504 // Lookup the immediate from table of supported immediates.
1505 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1506 assert(Desc && "Unknown enum value");
1507
1508 // Calculate its FP value.
1509 APFloat RealVal(APFloat::IEEEdouble());
1510 auto StatusOrErr =
1511 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1512 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1513 llvm_unreachable("FP immediate is not exact");
1514
1515 if (getFPImm().bitwiseIsEqual(RealVal))
1516 return DiagnosticPredicateTy::Match;
1517 }
1518
1519 return DiagnosticPredicateTy::NearMatch;
1520 }
1521
1522 template <unsigned ImmA, unsigned ImmB>
isExactFPImm() const1523 DiagnosticPredicate isExactFPImm() const {
1524 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1525 if ((Res = isExactFPImm<ImmA>()))
1526 return DiagnosticPredicateTy::Match;
1527 if ((Res = isExactFPImm<ImmB>()))
1528 return DiagnosticPredicateTy::Match;
1529 return Res;
1530 }
1531
isExtend() const1532 bool isExtend() const {
1533 if (!isShiftExtend())
1534 return false;
1535
1536 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1537 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1538 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1539 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1540 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1541 ET == AArch64_AM::LSL) &&
1542 getShiftExtendAmount() <= 4;
1543 }
1544
isExtend64() const1545 bool isExtend64() const {
1546 if (!isExtend())
1547 return false;
1548 // Make sure the extend expects a 32-bit source register.
1549 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1550 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1551 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1552 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1553 }
1554
isExtendLSL64() const1555 bool isExtendLSL64() const {
1556 if (!isExtend())
1557 return false;
1558 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1559 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1560 ET == AArch64_AM::LSL) &&
1561 getShiftExtendAmount() <= 4;
1562 }
1563
isLSLImm3Shift() const1564 bool isLSLImm3Shift() const {
1565 if (!isShiftExtend())
1566 return false;
1567 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1568 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1569 }
1570
isMemXExtend() const1571 template<int Width> bool isMemXExtend() const {
1572 if (!isExtend())
1573 return false;
1574 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1575 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1576 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1577 getShiftExtendAmount() == 0);
1578 }
1579
isMemWExtend() const1580 template<int Width> bool isMemWExtend() const {
1581 if (!isExtend())
1582 return false;
1583 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1584 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1585 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1586 getShiftExtendAmount() == 0);
1587 }
1588
1589 template <unsigned width>
isArithmeticShifter() const1590 bool isArithmeticShifter() const {
1591 if (!isShifter())
1592 return false;
1593
1594 // An arithmetic shifter is LSL, LSR, or ASR.
1595 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1596 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1597 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1598 }
1599
1600 template <unsigned width>
isLogicalShifter() const1601 bool isLogicalShifter() const {
1602 if (!isShifter())
1603 return false;
1604
1605 // A logical shifter is LSL, LSR, ASR or ROR.
1606 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1607 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1608 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1609 getShiftExtendAmount() < width;
1610 }
1611
isMovImm32Shifter() const1612 bool isMovImm32Shifter() const {
1613 if (!isShifter())
1614 return false;
1615
1616 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1617 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1618 if (ST != AArch64_AM::LSL)
1619 return false;
1620 uint64_t Val = getShiftExtendAmount();
1621 return (Val == 0 || Val == 16);
1622 }
1623
isMovImm64Shifter() const1624 bool isMovImm64Shifter() const {
1625 if (!isShifter())
1626 return false;
1627
1628 // A MOVi shifter is LSL of 0 or 16.
1629 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1630 if (ST != AArch64_AM::LSL)
1631 return false;
1632 uint64_t Val = getShiftExtendAmount();
1633 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1634 }
1635
isLogicalVecShifter() const1636 bool isLogicalVecShifter() const {
1637 if (!isShifter())
1638 return false;
1639
1640 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1641 unsigned Shift = getShiftExtendAmount();
1642 return getShiftExtendType() == AArch64_AM::LSL &&
1643 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1644 }
1645
isLogicalVecHalfWordShifter() const1646 bool isLogicalVecHalfWordShifter() const {
1647 if (!isLogicalVecShifter())
1648 return false;
1649
1650 // A logical vector shifter is a left shift by 0 or 8.
1651 unsigned Shift = getShiftExtendAmount();
1652 return getShiftExtendType() == AArch64_AM::LSL &&
1653 (Shift == 0 || Shift == 8);
1654 }
1655
isMoveVecShifter() const1656 bool isMoveVecShifter() const {
1657 if (!isShiftExtend())
1658 return false;
1659
1660 // A logical vector shifter is a left shift by 8 or 16.
1661 unsigned Shift = getShiftExtendAmount();
1662 return getShiftExtendType() == AArch64_AM::MSL &&
1663 (Shift == 8 || Shift == 16);
1664 }
1665
1666 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1667 // to LDUR/STUR when the offset is not legal for the former but is for
1668 // the latter. As such, in addition to checking for being a legal unscaled
1669 // address, also check that it is not a legal scaled address. This avoids
1670 // ambiguity in the matcher.
1671 template<int Width>
isSImm9OffsetFB() const1672 bool isSImm9OffsetFB() const {
1673 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1674 }
1675
isAdrpLabel() const1676 bool isAdrpLabel() const {
1677 // Validation was handled during parsing, so we just verify that
1678 // something didn't go haywire.
1679 if (!isImm())
1680 return false;
1681
1682 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1683 int64_t Val = CE->getValue();
1684 int64_t Min = - (4096 * (1LL << (21 - 1)));
1685 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1686 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1687 }
1688
1689 return true;
1690 }
1691
isAdrLabel() const1692 bool isAdrLabel() const {
1693 // Validation was handled during parsing, so we just verify that
1694 // something didn't go haywire.
1695 if (!isImm())
1696 return false;
1697
1698 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1699 int64_t Val = CE->getValue();
1700 int64_t Min = - (1LL << (21 - 1));
1701 int64_t Max = ((1LL << (21 - 1)) - 1);
1702 return Val >= Min && Val <= Max;
1703 }
1704
1705 return true;
1706 }
1707
1708 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
isMatrixRegOperand() const1709 DiagnosticPredicate isMatrixRegOperand() const {
1710 if (!isMatrix())
1711 return DiagnosticPredicateTy::NoMatch;
1712 if (getMatrixKind() != Kind ||
1713 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1714 EltSize != getMatrixElementWidth())
1715 return DiagnosticPredicateTy::NearMatch;
1716 return DiagnosticPredicateTy::Match;
1717 }
1718
isPAuthPCRelLabel16Operand() const1719 bool isPAuthPCRelLabel16Operand() const {
1720 // PAuth PCRel16 operands are similar to regular branch targets, but only
1721 // negative values are allowed for concrete immediates as signing instr
1722 // should be in a lower address.
1723 if (!isImm())
1724 return false;
1725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1726 if (!MCE)
1727 return true;
1728 int64_t Val = MCE->getValue();
1729 if (Val & 0b11)
1730 return false;
1731 return (Val <= 0) && (Val > -(1 << 18));
1732 }
1733
addExpr(MCInst & Inst,const MCExpr * Expr) const1734 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1735 // Add as immediates when possible. Null MCExpr = 0.
1736 if (!Expr)
1737 Inst.addOperand(MCOperand::createImm(0));
1738 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1739 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1740 else
1741 Inst.addOperand(MCOperand::createExpr(Expr));
1742 }
1743
addRegOperands(MCInst & Inst,unsigned N) const1744 void addRegOperands(MCInst &Inst, unsigned N) const {
1745 assert(N == 1 && "Invalid number of operands!");
1746 Inst.addOperand(MCOperand::createReg(getReg()));
1747 }
1748
addMatrixOperands(MCInst & Inst,unsigned N) const1749 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1750 assert(N == 1 && "Invalid number of operands!");
1751 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1752 }
1753
addGPR32as64Operands(MCInst & Inst,unsigned N) const1754 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1755 assert(N == 1 && "Invalid number of operands!");
1756 assert(
1757 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1758
1759 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1760 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1761 RI->getEncodingValue(getReg()));
1762
1763 Inst.addOperand(MCOperand::createReg(Reg));
1764 }
1765
addGPR64as32Operands(MCInst & Inst,unsigned N) const1766 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!");
1768 assert(
1769 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1770
1771 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1772 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1773 RI->getEncodingValue(getReg()));
1774
1775 Inst.addOperand(MCOperand::createReg(Reg));
1776 }
1777
1778 template <int Width>
addFPRasZPRRegOperands(MCInst & Inst,unsigned N) const1779 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1780 unsigned Base;
1781 switch (Width) {
1782 case 8: Base = AArch64::B0; break;
1783 case 16: Base = AArch64::H0; break;
1784 case 32: Base = AArch64::S0; break;
1785 case 64: Base = AArch64::D0; break;
1786 case 128: Base = AArch64::Q0; break;
1787 default:
1788 llvm_unreachable("Unsupported width");
1789 }
1790 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1791 }
1792
addPPRorPNRRegOperands(MCInst & Inst,unsigned N) const1793 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1794 assert(N == 1 && "Invalid number of operands!");
1795 unsigned Reg = getReg();
1796 // Normalise to PPR
1797 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1798 Reg = Reg - AArch64::PN0 + AArch64::P0;
1799 Inst.addOperand(MCOperand::createReg(Reg));
1800 }
1801
addPNRasPPRRegOperands(MCInst & Inst,unsigned N) const1802 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1803 assert(N == 1 && "Invalid number of operands!");
1804 Inst.addOperand(
1805 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1806 }
1807
addVectorReg64Operands(MCInst & Inst,unsigned N) const1808 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1809 assert(N == 1 && "Invalid number of operands!");
1810 assert(
1811 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1812 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1813 }
1814
addVectorReg128Operands(MCInst & Inst,unsigned N) const1815 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1816 assert(N == 1 && "Invalid number of operands!");
1817 assert(
1818 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1819 Inst.addOperand(MCOperand::createReg(getReg()));
1820 }
1821
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1822 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1823 assert(N == 1 && "Invalid number of operands!");
1824 Inst.addOperand(MCOperand::createReg(getReg()));
1825 }
1826
addVectorReg0to7Operands(MCInst & Inst,unsigned N) const1827 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1828 assert(N == 1 && "Invalid number of operands!");
1829 Inst.addOperand(MCOperand::createReg(getReg()));
1830 }
1831
1832 enum VecListIndexType {
1833 VecListIdx_DReg = 0,
1834 VecListIdx_QReg = 1,
1835 VecListIdx_ZReg = 2,
1836 VecListIdx_PReg = 3,
1837 };
1838
1839 template <VecListIndexType RegTy, unsigned NumRegs>
addVectorListOperands(MCInst & Inst,unsigned N) const1840 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1841 assert(N == 1 && "Invalid number of operands!");
1842 static const unsigned FirstRegs[][5] = {
1843 /* DReg */ { AArch64::Q0,
1844 AArch64::D0, AArch64::D0_D1,
1845 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1846 /* QReg */ { AArch64::Q0,
1847 AArch64::Q0, AArch64::Q0_Q1,
1848 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1849 /* ZReg */ { AArch64::Z0,
1850 AArch64::Z0, AArch64::Z0_Z1,
1851 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1852 /* PReg */ { AArch64::P0,
1853 AArch64::P0, AArch64::P0_P1 }
1854 };
1855
1856 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1857 " NumRegs must be <= 4 for ZRegs");
1858
1859 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1860 " NumRegs must be <= 2 for PRegs");
1861
1862 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1863 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1864 FirstRegs[(unsigned)RegTy][0]));
1865 }
1866
1867 template <unsigned NumRegs>
addStridedVectorListOperands(MCInst & Inst,unsigned N) const1868 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1869 assert(N == 1 && "Invalid number of operands!");
1870 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1871
1872 switch (NumRegs) {
1873 case 2:
1874 if (getVectorListStart() < AArch64::Z16) {
1875 assert((getVectorListStart() < AArch64::Z8) &&
1876 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1877 Inst.addOperand(MCOperand::createReg(
1878 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1879 } else {
1880 assert((getVectorListStart() < AArch64::Z24) &&
1881 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1882 Inst.addOperand(MCOperand::createReg(
1883 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1884 }
1885 break;
1886 case 4:
1887 if (getVectorListStart() < AArch64::Z16) {
1888 assert((getVectorListStart() < AArch64::Z4) &&
1889 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1890 Inst.addOperand(MCOperand::createReg(
1891 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1892 } else {
1893 assert((getVectorListStart() < AArch64::Z20) &&
1894 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1895 Inst.addOperand(MCOperand::createReg(
1896 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1897 }
1898 break;
1899 default:
1900 llvm_unreachable("Unsupported number of registers for strided vec list");
1901 }
1902 }
1903
addMatrixTileListOperands(MCInst & Inst,unsigned N) const1904 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1905 assert(N == 1 && "Invalid number of operands!");
1906 unsigned RegMask = getMatrixTileListRegMask();
1907 assert(RegMask <= 0xFF && "Invalid mask!");
1908 Inst.addOperand(MCOperand::createImm(RegMask));
1909 }
1910
addVectorIndexOperands(MCInst & Inst,unsigned N) const1911 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1912 assert(N == 1 && "Invalid number of operands!");
1913 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1914 }
1915
1916 template <unsigned ImmIs0, unsigned ImmIs1>
addExactFPImmOperands(MCInst & Inst,unsigned N) const1917 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1918 assert(N == 1 && "Invalid number of operands!");
1919 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1920 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1921 }
1922
addImmOperands(MCInst & Inst,unsigned N) const1923 void addImmOperands(MCInst &Inst, unsigned N) const {
1924 assert(N == 1 && "Invalid number of operands!");
1925 // If this is a pageoff symrefexpr with an addend, adjust the addend
1926 // to be only the page-offset portion. Otherwise, just add the expr
1927 // as-is.
1928 addExpr(Inst, getImm());
1929 }
1930
1931 template <int Shift>
addImmWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1932 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1933 assert(N == 2 && "Invalid number of operands!");
1934 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1935 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1936 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1937 } else if (isShiftedImm()) {
1938 addExpr(Inst, getShiftedImmVal());
1939 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1940 } else {
1941 addExpr(Inst, getImm());
1942 Inst.addOperand(MCOperand::createImm(0));
1943 }
1944 }
1945
1946 template <int Shift>
addImmNegWithOptionalShiftOperands(MCInst & Inst,unsigned N) const1947 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1948 assert(N == 2 && "Invalid number of operands!");
1949 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1950 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1951 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1952 } else
1953 llvm_unreachable("Not a shifted negative immediate");
1954 }
1955
addCondCodeOperands(MCInst & Inst,unsigned N) const1956 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1957 assert(N == 1 && "Invalid number of operands!");
1958 Inst.addOperand(MCOperand::createImm(getCondCode()));
1959 }
1960
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1961 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1962 assert(N == 1 && "Invalid number of operands!");
1963 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1964 if (!MCE)
1965 addExpr(Inst, getImm());
1966 else
1967 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1968 }
1969
addAdrLabelOperands(MCInst & Inst,unsigned N) const1970 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1971 addImmOperands(Inst, N);
1972 }
1973
1974 template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1975 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1976 assert(N == 1 && "Invalid number of operands!");
1977 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1978
1979 if (!MCE) {
1980 Inst.addOperand(MCOperand::createExpr(getImm()));
1981 return;
1982 }
1983 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1984 }
1985
addUImm6Operands(MCInst & Inst,unsigned N) const1986 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1987 assert(N == 1 && "Invalid number of operands!");
1988 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1989 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1990 }
1991
1992 template <int Scale>
addImmScaledOperands(MCInst & Inst,unsigned N) const1993 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1994 assert(N == 1 && "Invalid number of operands!");
1995 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1996 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1997 }
1998
1999 template <int Scale>
addImmScaledRangeOperands(MCInst & Inst,unsigned N) const2000 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2001 assert(N == 1 && "Invalid number of operands!");
2002 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2003 }
2004
2005 template <typename T>
addLogicalImmOperands(MCInst & Inst,unsigned N) const2006 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2007 assert(N == 1 && "Invalid number of operands!");
2008 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2009 std::make_unsigned_t<T> Val = MCE->getValue();
2010 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2011 Inst.addOperand(MCOperand::createImm(encoding));
2012 }
2013
2014 template <typename T>
addLogicalImmNotOperands(MCInst & Inst,unsigned N) const2015 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2016 assert(N == 1 && "Invalid number of operands!");
2017 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2018 std::make_unsigned_t<T> Val = ~MCE->getValue();
2019 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2020 Inst.addOperand(MCOperand::createImm(encoding));
2021 }
2022
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const2023 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2024 assert(N == 1 && "Invalid number of operands!");
2025 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2026 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2027 Inst.addOperand(MCOperand::createImm(encoding));
2028 }
2029
addBranchTarget26Operands(MCInst & Inst,unsigned N) const2030 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2031 // Branch operands don't encode the low bits, so shift them off
2032 // here. If it's a label, however, just put it on directly as there's
2033 // not enough information now to do anything.
2034 assert(N == 1 && "Invalid number of operands!");
2035 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2036 if (!MCE) {
2037 addExpr(Inst, getImm());
2038 return;
2039 }
2040 assert(MCE && "Invalid constant immediate operand!");
2041 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2042 }
2043
addPAuthPCRelLabel16Operands(MCInst & Inst,unsigned N) const2044 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2045 // PC-relative operands don't encode the low bits, so shift them off
2046 // here. If it's a label, however, just put it on directly as there's
2047 // not enough information now to do anything.
2048 assert(N == 1 && "Invalid number of operands!");
2049 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2050 if (!MCE) {
2051 addExpr(Inst, getImm());
2052 return;
2053 }
2054 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2055 }
2056
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const2057 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2058 // Branch operands don't encode the low bits, so shift them off
2059 // here. If it's a label, however, just put it on directly as there's
2060 // not enough information now to do anything.
2061 assert(N == 1 && "Invalid number of operands!");
2062 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2063 if (!MCE) {
2064 addExpr(Inst, getImm());
2065 return;
2066 }
2067 assert(MCE && "Invalid constant immediate operand!");
2068 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2069 }
2070
addBranchTarget14Operands(MCInst & Inst,unsigned N) const2071 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2072 // Branch operands don't encode the low bits, so shift them off
2073 // here. If it's a label, however, just put it on directly as there's
2074 // not enough information now to do anything.
2075 assert(N == 1 && "Invalid number of operands!");
2076 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2077 if (!MCE) {
2078 addExpr(Inst, getImm());
2079 return;
2080 }
2081 assert(MCE && "Invalid constant immediate operand!");
2082 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2083 }
2084
addFPImmOperands(MCInst & Inst,unsigned N) const2085 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2086 assert(N == 1 && "Invalid number of operands!");
2087 Inst.addOperand(MCOperand::createImm(
2088 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2089 }
2090
addBarrierOperands(MCInst & Inst,unsigned N) const2091 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2092 assert(N == 1 && "Invalid number of operands!");
2093 Inst.addOperand(MCOperand::createImm(getBarrier()));
2094 }
2095
addBarriernXSOperands(MCInst & Inst,unsigned N) const2096 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2097 assert(N == 1 && "Invalid number of operands!");
2098 Inst.addOperand(MCOperand::createImm(getBarrier()));
2099 }
2100
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const2101 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2102 assert(N == 1 && "Invalid number of operands!");
2103
2104 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2105 }
2106
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const2107 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2108 assert(N == 1 && "Invalid number of operands!");
2109
2110 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2111 }
2112
addSystemPStateFieldWithImm0_1Operands(MCInst & Inst,unsigned N) const2113 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2114 assert(N == 1 && "Invalid number of operands!");
2115
2116 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2117 }
2118
addSVCROperands(MCInst & Inst,unsigned N) const2119 void addSVCROperands(MCInst &Inst, unsigned N) const {
2120 assert(N == 1 && "Invalid number of operands!");
2121
2122 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2123 }
2124
addSystemPStateFieldWithImm0_15Operands(MCInst & Inst,unsigned N) const2125 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2126 assert(N == 1 && "Invalid number of operands!");
2127
2128 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2129 }
2130
addSysCROperands(MCInst & Inst,unsigned N) const2131 void addSysCROperands(MCInst &Inst, unsigned N) const {
2132 assert(N == 1 && "Invalid number of operands!");
2133 Inst.addOperand(MCOperand::createImm(getSysCR()));
2134 }
2135
addPrefetchOperands(MCInst & Inst,unsigned N) const2136 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2137 assert(N == 1 && "Invalid number of operands!");
2138 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2139 }
2140
addPSBHintOperands(MCInst & Inst,unsigned N) const2141 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2142 assert(N == 1 && "Invalid number of operands!");
2143 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2144 }
2145
addBTIHintOperands(MCInst & Inst,unsigned N) const2146 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2147 assert(N == 1 && "Invalid number of operands!");
2148 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2149 }
2150
addShifterOperands(MCInst & Inst,unsigned N) const2151 void addShifterOperands(MCInst &Inst, unsigned N) const {
2152 assert(N == 1 && "Invalid number of operands!");
2153 unsigned Imm =
2154 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2155 Inst.addOperand(MCOperand::createImm(Imm));
2156 }
2157
addLSLImm3ShifterOperands(MCInst & Inst,unsigned N) const2158 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2159 assert(N == 1 && "Invalid number of operands!");
2160 unsigned Imm = getShiftExtendAmount();
2161 Inst.addOperand(MCOperand::createImm(Imm));
2162 }
2163
addSyspXzrPairOperand(MCInst & Inst,unsigned N) const2164 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2165 assert(N == 1 && "Invalid number of operands!");
2166
2167 if (!isScalarReg())
2168 return;
2169
2170 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2171 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2172 .getRegister(RI->getEncodingValue(getReg()));
2173 if (Reg != AArch64::XZR)
2174 llvm_unreachable("wrong register");
2175
2176 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2177 }
2178
addExtendOperands(MCInst & Inst,unsigned N) const2179 void addExtendOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2182 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2183 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2184 Inst.addOperand(MCOperand::createImm(Imm));
2185 }
2186
addExtend64Operands(MCInst & Inst,unsigned N) const2187 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2188 assert(N == 1 && "Invalid number of operands!");
2189 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2190 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2191 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2192 Inst.addOperand(MCOperand::createImm(Imm));
2193 }
2194
addMemExtendOperands(MCInst & Inst,unsigned N) const2195 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2196 assert(N == 2 && "Invalid number of operands!");
2197 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2198 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2199 Inst.addOperand(MCOperand::createImm(IsSigned));
2200 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2201 }
2202
2203 // For 8-bit load/store instructions with a register offset, both the
2204 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2205 // they're disambiguated by whether the shift was explicit or implicit rather
2206 // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const2207 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2208 assert(N == 2 && "Invalid number of operands!");
2209 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2210 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2211 Inst.addOperand(MCOperand::createImm(IsSigned));
2212 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2213 }
2214
2215 template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const2216 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2217 assert(N == 1 && "Invalid number of operands!");
2218
2219 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2220 if (CE) {
2221 uint64_t Value = CE->getValue();
2222 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2223 } else {
2224 addExpr(Inst, getImm());
2225 }
2226 }
2227
2228 template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const2229 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2230 assert(N == 1 && "Invalid number of operands!");
2231
2232 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2233 uint64_t Value = CE->getValue();
2234 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2235 }
2236
addComplexRotationEvenOperands(MCInst & Inst,unsigned N) const2237 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2238 assert(N == 1 && "Invalid number of operands!");
2239 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2240 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2241 }
2242
addComplexRotationOddOperands(MCInst & Inst,unsigned N) const2243 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 1 && "Invalid number of operands!");
2245 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2246 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2247 }
2248
2249 void print(raw_ostream &OS) const override;
2250
2251 static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,SMLoc S,MCContext & Ctx,bool IsSuffix=false)2252 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2253 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2254 Op->Tok.Data = Str.data();
2255 Op->Tok.Length = Str.size();
2256 Op->Tok.IsSuffix = IsSuffix;
2257 Op->StartLoc = S;
2258 Op->EndLoc = S;
2259 return Op;
2260 }
2261
2262 static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,RegKind Kind,SMLoc S,SMLoc E,MCContext & Ctx,RegConstraintEqualityTy EqTy=RegConstraintEqualityTy::EqualsReg,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)2263 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2264 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2265 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2266 unsigned ShiftAmount = 0,
2267 unsigned HasExplicitAmount = false) {
2268 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2269 Op->Reg.RegNum = RegNum;
2270 Op->Reg.Kind = Kind;
2271 Op->Reg.ElementWidth = 0;
2272 Op->Reg.EqualityTy = EqTy;
2273 Op->Reg.ShiftExtend.Type = ExtTy;
2274 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2275 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2276 Op->StartLoc = S;
2277 Op->EndLoc = E;
2278 return Op;
2279 }
2280
2281 static std::unique_ptr<AArch64Operand>
CreateVectorReg(unsigned RegNum,RegKind Kind,unsigned ElementWidth,SMLoc S,SMLoc E,MCContext & Ctx,AArch64_AM::ShiftExtendType ExtTy=AArch64_AM::LSL,unsigned ShiftAmount=0,unsigned HasExplicitAmount=false)2282 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2283 SMLoc S, SMLoc E, MCContext &Ctx,
2284 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2285 unsigned ShiftAmount = 0,
2286 unsigned HasExplicitAmount = false) {
2287 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2288 Kind == RegKind::SVEPredicateVector ||
2289 Kind == RegKind::SVEPredicateAsCounter) &&
2290 "Invalid vector kind");
2291 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2292 HasExplicitAmount);
2293 Op->Reg.ElementWidth = ElementWidth;
2294 return Op;
2295 }
2296
2297 static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned Stride,unsigned NumElements,unsigned ElementWidth,RegKind RegisterKind,SMLoc S,SMLoc E,MCContext & Ctx)2298 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2299 unsigned NumElements, unsigned ElementWidth,
2300 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2301 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2302 Op->VectorList.RegNum = RegNum;
2303 Op->VectorList.Count = Count;
2304 Op->VectorList.Stride = Stride;
2305 Op->VectorList.NumElements = NumElements;
2306 Op->VectorList.ElementWidth = ElementWidth;
2307 Op->VectorList.RegisterKind = RegisterKind;
2308 Op->StartLoc = S;
2309 Op->EndLoc = E;
2310 return Op;
2311 }
2312
2313 static std::unique_ptr<AArch64Operand>
CreateVectorIndex(int Idx,SMLoc S,SMLoc E,MCContext & Ctx)2314 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2315 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2316 Op->VectorIndex.Val = Idx;
2317 Op->StartLoc = S;
2318 Op->EndLoc = E;
2319 return Op;
2320 }
2321
2322 static std::unique_ptr<AArch64Operand>
CreateMatrixTileList(unsigned RegMask,SMLoc S,SMLoc E,MCContext & Ctx)2323 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2324 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2325 Op->MatrixTileList.RegMask = RegMask;
2326 Op->StartLoc = S;
2327 Op->EndLoc = E;
2328 return Op;
2329 }
2330
ComputeRegsForAlias(unsigned Reg,SmallSet<unsigned,8> & OutRegs,const unsigned ElementWidth)2331 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2332 const unsigned ElementWidth) {
2333 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2334 RegMap = {
2335 {{0, AArch64::ZAB0},
2336 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2337 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2338 {{8, AArch64::ZAB0},
2339 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2340 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2341 {{16, AArch64::ZAH0},
2342 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2343 {{16, AArch64::ZAH1},
2344 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2345 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2346 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2347 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2348 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2349 };
2350
2351 if (ElementWidth == 64)
2352 OutRegs.insert(Reg);
2353 else {
2354 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2355 assert(!Regs.empty() && "Invalid tile or element width!");
2356 for (auto OutReg : Regs)
2357 OutRegs.insert(OutReg);
2358 }
2359 }
2360
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)2361 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2362 SMLoc E, MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2364 Op->Imm.Val = Val;
2365 Op->StartLoc = S;
2366 Op->EndLoc = E;
2367 return Op;
2368 }
2369
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)2370 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2371 unsigned ShiftAmount,
2372 SMLoc S, SMLoc E,
2373 MCContext &Ctx) {
2374 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2375 Op->ShiftedImm .Val = Val;
2376 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2377 Op->StartLoc = S;
2378 Op->EndLoc = E;
2379 return Op;
2380 }
2381
CreateImmRange(unsigned First,unsigned Last,SMLoc S,SMLoc E,MCContext & Ctx)2382 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2383 unsigned Last, SMLoc S,
2384 SMLoc E,
2385 MCContext &Ctx) {
2386 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2387 Op->ImmRange.First = First;
2388 Op->ImmRange.Last = Last;
2389 Op->EndLoc = E;
2390 return Op;
2391 }
2392
2393 static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)2394 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2395 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2396 Op->CondCode.Code = Code;
2397 Op->StartLoc = S;
2398 Op->EndLoc = E;
2399 return Op;
2400 }
2401
2402 static std::unique_ptr<AArch64Operand>
CreateFPImm(APFloat Val,bool IsExact,SMLoc S,MCContext & Ctx)2403 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2404 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2405 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2406 Op->FPImm.IsExact = IsExact;
2407 Op->StartLoc = S;
2408 Op->EndLoc = S;
2409 return Op;
2410 }
2411
CreateBarrier(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx,bool HasnXSModifier)2412 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2413 StringRef Str,
2414 SMLoc S,
2415 MCContext &Ctx,
2416 bool HasnXSModifier) {
2417 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2418 Op->Barrier.Val = Val;
2419 Op->Barrier.Data = Str.data();
2420 Op->Barrier.Length = Str.size();
2421 Op->Barrier.HasnXSModifier = HasnXSModifier;
2422 Op->StartLoc = S;
2423 Op->EndLoc = S;
2424 return Op;
2425 }
2426
CreateSysReg(StringRef Str,SMLoc S,uint32_t MRSReg,uint32_t MSRReg,uint32_t PStateField,MCContext & Ctx)2427 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2428 uint32_t MRSReg,
2429 uint32_t MSRReg,
2430 uint32_t PStateField,
2431 MCContext &Ctx) {
2432 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2433 Op->SysReg.Data = Str.data();
2434 Op->SysReg.Length = Str.size();
2435 Op->SysReg.MRSReg = MRSReg;
2436 Op->SysReg.MSRReg = MSRReg;
2437 Op->SysReg.PStateField = PStateField;
2438 Op->StartLoc = S;
2439 Op->EndLoc = S;
2440 return Op;
2441 }
2442
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)2443 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2444 SMLoc E, MCContext &Ctx) {
2445 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2446 Op->SysCRImm.Val = Val;
2447 Op->StartLoc = S;
2448 Op->EndLoc = E;
2449 return Op;
2450 }
2451
CreatePrefetch(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2452 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2453 StringRef Str,
2454 SMLoc S,
2455 MCContext &Ctx) {
2456 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2457 Op->Prefetch.Val = Val;
2458 Op->Barrier.Data = Str.data();
2459 Op->Barrier.Length = Str.size();
2460 Op->StartLoc = S;
2461 Op->EndLoc = S;
2462 return Op;
2463 }
2464
CreatePSBHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2465 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2466 StringRef Str,
2467 SMLoc S,
2468 MCContext &Ctx) {
2469 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2470 Op->PSBHint.Val = Val;
2471 Op->PSBHint.Data = Str.data();
2472 Op->PSBHint.Length = Str.size();
2473 Op->StartLoc = S;
2474 Op->EndLoc = S;
2475 return Op;
2476 }
2477
CreateBTIHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)2478 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2479 StringRef Str,
2480 SMLoc S,
2481 MCContext &Ctx) {
2482 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2483 Op->BTIHint.Val = Val | 32;
2484 Op->BTIHint.Data = Str.data();
2485 Op->BTIHint.Length = Str.size();
2486 Op->StartLoc = S;
2487 Op->EndLoc = S;
2488 return Op;
2489 }
2490
2491 static std::unique_ptr<AArch64Operand>
CreateMatrixRegister(unsigned RegNum,unsigned ElementWidth,MatrixKind Kind,SMLoc S,SMLoc E,MCContext & Ctx)2492 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2493 SMLoc S, SMLoc E, MCContext &Ctx) {
2494 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2495 Op->MatrixReg.RegNum = RegNum;
2496 Op->MatrixReg.ElementWidth = ElementWidth;
2497 Op->MatrixReg.Kind = Kind;
2498 Op->StartLoc = S;
2499 Op->EndLoc = E;
2500 return Op;
2501 }
2502
2503 static std::unique_ptr<AArch64Operand>
CreateSVCR(uint32_t PStateField,StringRef Str,SMLoc S,MCContext & Ctx)2504 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2505 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2506 Op->SVCR.PStateField = PStateField;
2507 Op->SVCR.Data = Str.data();
2508 Op->SVCR.Length = Str.size();
2509 Op->StartLoc = S;
2510 Op->EndLoc = S;
2511 return Op;
2512 }
2513
2514 static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)2515 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2516 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2517 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2518 Op->ShiftExtend.Type = ShOp;
2519 Op->ShiftExtend.Amount = Val;
2520 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2521 Op->StartLoc = S;
2522 Op->EndLoc = E;
2523 return Op;
2524 }
2525 };
2526
2527 } // end anonymous namespace.
2528
print(raw_ostream & OS) const2529 void AArch64Operand::print(raw_ostream &OS) const {
2530 switch (Kind) {
2531 case k_FPImm:
2532 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2533 if (!getFPImmIsExact())
2534 OS << " (inexact)";
2535 OS << ">";
2536 break;
2537 case k_Barrier: {
2538 StringRef Name = getBarrierName();
2539 if (!Name.empty())
2540 OS << "<barrier " << Name << ">";
2541 else
2542 OS << "<barrier invalid #" << getBarrier() << ">";
2543 break;
2544 }
2545 case k_Immediate:
2546 OS << *getImm();
2547 break;
2548 case k_ShiftedImm: {
2549 unsigned Shift = getShiftedImmShift();
2550 OS << "<shiftedimm ";
2551 OS << *getShiftedImmVal();
2552 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2553 break;
2554 }
2555 case k_ImmRange: {
2556 OS << "<immrange ";
2557 OS << getFirstImmVal();
2558 OS << ":" << getLastImmVal() << ">";
2559 break;
2560 }
2561 case k_CondCode:
2562 OS << "<condcode " << getCondCode() << ">";
2563 break;
2564 case k_VectorList: {
2565 OS << "<vectorlist ";
2566 unsigned Reg = getVectorListStart();
2567 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2568 OS << Reg + i * getVectorListStride() << " ";
2569 OS << ">";
2570 break;
2571 }
2572 case k_VectorIndex:
2573 OS << "<vectorindex " << getVectorIndex() << ">";
2574 break;
2575 case k_SysReg:
2576 OS << "<sysreg: " << getSysReg() << '>';
2577 break;
2578 case k_Token:
2579 OS << "'" << getToken() << "'";
2580 break;
2581 case k_SysCR:
2582 OS << "c" << getSysCR();
2583 break;
2584 case k_Prefetch: {
2585 StringRef Name = getPrefetchName();
2586 if (!Name.empty())
2587 OS << "<prfop " << Name << ">";
2588 else
2589 OS << "<prfop invalid #" << getPrefetch() << ">";
2590 break;
2591 }
2592 case k_PSBHint:
2593 OS << getPSBHintName();
2594 break;
2595 case k_BTIHint:
2596 OS << getBTIHintName();
2597 break;
2598 case k_MatrixRegister:
2599 OS << "<matrix " << getMatrixReg() << ">";
2600 break;
2601 case k_MatrixTileList: {
2602 OS << "<matrixlist ";
2603 unsigned RegMask = getMatrixTileListRegMask();
2604 unsigned MaxBits = 8;
2605 for (unsigned I = MaxBits; I > 0; --I)
2606 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2607 OS << '>';
2608 break;
2609 }
2610 case k_SVCR: {
2611 OS << getSVCR();
2612 break;
2613 }
2614 case k_Register:
2615 OS << "<register " << getReg() << ">";
2616 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2617 break;
2618 [[fallthrough]];
2619 case k_ShiftExtend:
2620 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2621 << getShiftExtendAmount();
2622 if (!hasShiftExtendAmount())
2623 OS << "<imp>";
2624 OS << '>';
2625 break;
2626 }
2627 }
2628
2629 /// @name Auto-generated Match Functions
2630 /// {
2631
2632 static MCRegister MatchRegisterName(StringRef Name);
2633
2634 /// }
2635
MatchNeonVectorRegName(StringRef Name)2636 static unsigned MatchNeonVectorRegName(StringRef Name) {
2637 return StringSwitch<unsigned>(Name.lower())
2638 .Case("v0", AArch64::Q0)
2639 .Case("v1", AArch64::Q1)
2640 .Case("v2", AArch64::Q2)
2641 .Case("v3", AArch64::Q3)
2642 .Case("v4", AArch64::Q4)
2643 .Case("v5", AArch64::Q5)
2644 .Case("v6", AArch64::Q6)
2645 .Case("v7", AArch64::Q7)
2646 .Case("v8", AArch64::Q8)
2647 .Case("v9", AArch64::Q9)
2648 .Case("v10", AArch64::Q10)
2649 .Case("v11", AArch64::Q11)
2650 .Case("v12", AArch64::Q12)
2651 .Case("v13", AArch64::Q13)
2652 .Case("v14", AArch64::Q14)
2653 .Case("v15", AArch64::Q15)
2654 .Case("v16", AArch64::Q16)
2655 .Case("v17", AArch64::Q17)
2656 .Case("v18", AArch64::Q18)
2657 .Case("v19", AArch64::Q19)
2658 .Case("v20", AArch64::Q20)
2659 .Case("v21", AArch64::Q21)
2660 .Case("v22", AArch64::Q22)
2661 .Case("v23", AArch64::Q23)
2662 .Case("v24", AArch64::Q24)
2663 .Case("v25", AArch64::Q25)
2664 .Case("v26", AArch64::Q26)
2665 .Case("v27", AArch64::Q27)
2666 .Case("v28", AArch64::Q28)
2667 .Case("v29", AArch64::Q29)
2668 .Case("v30", AArch64::Q30)
2669 .Case("v31", AArch64::Q31)
2670 .Default(0);
2671 }
2672
2673 /// Returns an optional pair of (#elements, element-width) if Suffix
2674 /// is a valid vector kind. Where the number of elements in a vector
2675 /// or the vector width is implicit or explicitly unknown (but still a
2676 /// valid suffix kind), 0 is used.
parseVectorKind(StringRef Suffix,RegKind VectorKind)2677 static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2678 RegKind VectorKind) {
2679 std::pair<int, int> Res = {-1, -1};
2680
2681 switch (VectorKind) {
2682 case RegKind::NeonVector:
2683 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2684 .Case("", {0, 0})
2685 .Case(".1d", {1, 64})
2686 .Case(".1q", {1, 128})
2687 // '.2h' needed for fp16 scalar pairwise reductions
2688 .Case(".2h", {2, 16})
2689 .Case(".2b", {2, 8})
2690 .Case(".2s", {2, 32})
2691 .Case(".2d", {2, 64})
2692 // '.4b' is another special case for the ARMv8.2a dot product
2693 // operand
2694 .Case(".4b", {4, 8})
2695 .Case(".4h", {4, 16})
2696 .Case(".4s", {4, 32})
2697 .Case(".8b", {8, 8})
2698 .Case(".8h", {8, 16})
2699 .Case(".16b", {16, 8})
2700 // Accept the width neutral ones, too, for verbose syntax. If
2701 // those aren't used in the right places, the token operand won't
2702 // match so all will work out.
2703 .Case(".b", {0, 8})
2704 .Case(".h", {0, 16})
2705 .Case(".s", {0, 32})
2706 .Case(".d", {0, 64})
2707 .Default({-1, -1});
2708 break;
2709 case RegKind::SVEPredicateAsCounter:
2710 case RegKind::SVEPredicateVector:
2711 case RegKind::SVEDataVector:
2712 case RegKind::Matrix:
2713 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2714 .Case("", {0, 0})
2715 .Case(".b", {0, 8})
2716 .Case(".h", {0, 16})
2717 .Case(".s", {0, 32})
2718 .Case(".d", {0, 64})
2719 .Case(".q", {0, 128})
2720 .Default({-1, -1});
2721 break;
2722 default:
2723 llvm_unreachable("Unsupported RegKind");
2724 }
2725
2726 if (Res == std::make_pair(-1, -1))
2727 return std::nullopt;
2728
2729 return std::optional<std::pair<int, int>>(Res);
2730 }
2731
isValidVectorKind(StringRef Suffix,RegKind VectorKind)2732 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2733 return parseVectorKind(Suffix, VectorKind).has_value();
2734 }
2735
matchSVEDataVectorRegName(StringRef Name)2736 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2737 return StringSwitch<unsigned>(Name.lower())
2738 .Case("z0", AArch64::Z0)
2739 .Case("z1", AArch64::Z1)
2740 .Case("z2", AArch64::Z2)
2741 .Case("z3", AArch64::Z3)
2742 .Case("z4", AArch64::Z4)
2743 .Case("z5", AArch64::Z5)
2744 .Case("z6", AArch64::Z6)
2745 .Case("z7", AArch64::Z7)
2746 .Case("z8", AArch64::Z8)
2747 .Case("z9", AArch64::Z9)
2748 .Case("z10", AArch64::Z10)
2749 .Case("z11", AArch64::Z11)
2750 .Case("z12", AArch64::Z12)
2751 .Case("z13", AArch64::Z13)
2752 .Case("z14", AArch64::Z14)
2753 .Case("z15", AArch64::Z15)
2754 .Case("z16", AArch64::Z16)
2755 .Case("z17", AArch64::Z17)
2756 .Case("z18", AArch64::Z18)
2757 .Case("z19", AArch64::Z19)
2758 .Case("z20", AArch64::Z20)
2759 .Case("z21", AArch64::Z21)
2760 .Case("z22", AArch64::Z22)
2761 .Case("z23", AArch64::Z23)
2762 .Case("z24", AArch64::Z24)
2763 .Case("z25", AArch64::Z25)
2764 .Case("z26", AArch64::Z26)
2765 .Case("z27", AArch64::Z27)
2766 .Case("z28", AArch64::Z28)
2767 .Case("z29", AArch64::Z29)
2768 .Case("z30", AArch64::Z30)
2769 .Case("z31", AArch64::Z31)
2770 .Default(0);
2771 }
2772
matchSVEPredicateVectorRegName(StringRef Name)2773 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2774 return StringSwitch<unsigned>(Name.lower())
2775 .Case("p0", AArch64::P0)
2776 .Case("p1", AArch64::P1)
2777 .Case("p2", AArch64::P2)
2778 .Case("p3", AArch64::P3)
2779 .Case("p4", AArch64::P4)
2780 .Case("p5", AArch64::P5)
2781 .Case("p6", AArch64::P6)
2782 .Case("p7", AArch64::P7)
2783 .Case("p8", AArch64::P8)
2784 .Case("p9", AArch64::P9)
2785 .Case("p10", AArch64::P10)
2786 .Case("p11", AArch64::P11)
2787 .Case("p12", AArch64::P12)
2788 .Case("p13", AArch64::P13)
2789 .Case("p14", AArch64::P14)
2790 .Case("p15", AArch64::P15)
2791 .Default(0);
2792 }
2793
matchSVEPredicateAsCounterRegName(StringRef Name)2794 static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2795 return StringSwitch<unsigned>(Name.lower())
2796 .Case("pn0", AArch64::PN0)
2797 .Case("pn1", AArch64::PN1)
2798 .Case("pn2", AArch64::PN2)
2799 .Case("pn3", AArch64::PN3)
2800 .Case("pn4", AArch64::PN4)
2801 .Case("pn5", AArch64::PN5)
2802 .Case("pn6", AArch64::PN6)
2803 .Case("pn7", AArch64::PN7)
2804 .Case("pn8", AArch64::PN8)
2805 .Case("pn9", AArch64::PN9)
2806 .Case("pn10", AArch64::PN10)
2807 .Case("pn11", AArch64::PN11)
2808 .Case("pn12", AArch64::PN12)
2809 .Case("pn13", AArch64::PN13)
2810 .Case("pn14", AArch64::PN14)
2811 .Case("pn15", AArch64::PN15)
2812 .Default(0);
2813 }
2814
matchMatrixTileListRegName(StringRef Name)2815 static unsigned matchMatrixTileListRegName(StringRef Name) {
2816 return StringSwitch<unsigned>(Name.lower())
2817 .Case("za0.d", AArch64::ZAD0)
2818 .Case("za1.d", AArch64::ZAD1)
2819 .Case("za2.d", AArch64::ZAD2)
2820 .Case("za3.d", AArch64::ZAD3)
2821 .Case("za4.d", AArch64::ZAD4)
2822 .Case("za5.d", AArch64::ZAD5)
2823 .Case("za6.d", AArch64::ZAD6)
2824 .Case("za7.d", AArch64::ZAD7)
2825 .Case("za0.s", AArch64::ZAS0)
2826 .Case("za1.s", AArch64::ZAS1)
2827 .Case("za2.s", AArch64::ZAS2)
2828 .Case("za3.s", AArch64::ZAS3)
2829 .Case("za0.h", AArch64::ZAH0)
2830 .Case("za1.h", AArch64::ZAH1)
2831 .Case("za0.b", AArch64::ZAB0)
2832 .Default(0);
2833 }
2834
matchMatrixRegName(StringRef Name)2835 static unsigned matchMatrixRegName(StringRef Name) {
2836 return StringSwitch<unsigned>(Name.lower())
2837 .Case("za", AArch64::ZA)
2838 .Case("za0.q", AArch64::ZAQ0)
2839 .Case("za1.q", AArch64::ZAQ1)
2840 .Case("za2.q", AArch64::ZAQ2)
2841 .Case("za3.q", AArch64::ZAQ3)
2842 .Case("za4.q", AArch64::ZAQ4)
2843 .Case("za5.q", AArch64::ZAQ5)
2844 .Case("za6.q", AArch64::ZAQ6)
2845 .Case("za7.q", AArch64::ZAQ7)
2846 .Case("za8.q", AArch64::ZAQ8)
2847 .Case("za9.q", AArch64::ZAQ9)
2848 .Case("za10.q", AArch64::ZAQ10)
2849 .Case("za11.q", AArch64::ZAQ11)
2850 .Case("za12.q", AArch64::ZAQ12)
2851 .Case("za13.q", AArch64::ZAQ13)
2852 .Case("za14.q", AArch64::ZAQ14)
2853 .Case("za15.q", AArch64::ZAQ15)
2854 .Case("za0.d", AArch64::ZAD0)
2855 .Case("za1.d", AArch64::ZAD1)
2856 .Case("za2.d", AArch64::ZAD2)
2857 .Case("za3.d", AArch64::ZAD3)
2858 .Case("za4.d", AArch64::ZAD4)
2859 .Case("za5.d", AArch64::ZAD5)
2860 .Case("za6.d", AArch64::ZAD6)
2861 .Case("za7.d", AArch64::ZAD7)
2862 .Case("za0.s", AArch64::ZAS0)
2863 .Case("za1.s", AArch64::ZAS1)
2864 .Case("za2.s", AArch64::ZAS2)
2865 .Case("za3.s", AArch64::ZAS3)
2866 .Case("za0.h", AArch64::ZAH0)
2867 .Case("za1.h", AArch64::ZAH1)
2868 .Case("za0.b", AArch64::ZAB0)
2869 .Case("za0h.q", AArch64::ZAQ0)
2870 .Case("za1h.q", AArch64::ZAQ1)
2871 .Case("za2h.q", AArch64::ZAQ2)
2872 .Case("za3h.q", AArch64::ZAQ3)
2873 .Case("za4h.q", AArch64::ZAQ4)
2874 .Case("za5h.q", AArch64::ZAQ5)
2875 .Case("za6h.q", AArch64::ZAQ6)
2876 .Case("za7h.q", AArch64::ZAQ7)
2877 .Case("za8h.q", AArch64::ZAQ8)
2878 .Case("za9h.q", AArch64::ZAQ9)
2879 .Case("za10h.q", AArch64::ZAQ10)
2880 .Case("za11h.q", AArch64::ZAQ11)
2881 .Case("za12h.q", AArch64::ZAQ12)
2882 .Case("za13h.q", AArch64::ZAQ13)
2883 .Case("za14h.q", AArch64::ZAQ14)
2884 .Case("za15h.q", AArch64::ZAQ15)
2885 .Case("za0h.d", AArch64::ZAD0)
2886 .Case("za1h.d", AArch64::ZAD1)
2887 .Case("za2h.d", AArch64::ZAD2)
2888 .Case("za3h.d", AArch64::ZAD3)
2889 .Case("za4h.d", AArch64::ZAD4)
2890 .Case("za5h.d", AArch64::ZAD5)
2891 .Case("za6h.d", AArch64::ZAD6)
2892 .Case("za7h.d", AArch64::ZAD7)
2893 .Case("za0h.s", AArch64::ZAS0)
2894 .Case("za1h.s", AArch64::ZAS1)
2895 .Case("za2h.s", AArch64::ZAS2)
2896 .Case("za3h.s", AArch64::ZAS3)
2897 .Case("za0h.h", AArch64::ZAH0)
2898 .Case("za1h.h", AArch64::ZAH1)
2899 .Case("za0h.b", AArch64::ZAB0)
2900 .Case("za0v.q", AArch64::ZAQ0)
2901 .Case("za1v.q", AArch64::ZAQ1)
2902 .Case("za2v.q", AArch64::ZAQ2)
2903 .Case("za3v.q", AArch64::ZAQ3)
2904 .Case("za4v.q", AArch64::ZAQ4)
2905 .Case("za5v.q", AArch64::ZAQ5)
2906 .Case("za6v.q", AArch64::ZAQ6)
2907 .Case("za7v.q", AArch64::ZAQ7)
2908 .Case("za8v.q", AArch64::ZAQ8)
2909 .Case("za9v.q", AArch64::ZAQ9)
2910 .Case("za10v.q", AArch64::ZAQ10)
2911 .Case("za11v.q", AArch64::ZAQ11)
2912 .Case("za12v.q", AArch64::ZAQ12)
2913 .Case("za13v.q", AArch64::ZAQ13)
2914 .Case("za14v.q", AArch64::ZAQ14)
2915 .Case("za15v.q", AArch64::ZAQ15)
2916 .Case("za0v.d", AArch64::ZAD0)
2917 .Case("za1v.d", AArch64::ZAD1)
2918 .Case("za2v.d", AArch64::ZAD2)
2919 .Case("za3v.d", AArch64::ZAD3)
2920 .Case("za4v.d", AArch64::ZAD4)
2921 .Case("za5v.d", AArch64::ZAD5)
2922 .Case("za6v.d", AArch64::ZAD6)
2923 .Case("za7v.d", AArch64::ZAD7)
2924 .Case("za0v.s", AArch64::ZAS0)
2925 .Case("za1v.s", AArch64::ZAS1)
2926 .Case("za2v.s", AArch64::ZAS2)
2927 .Case("za3v.s", AArch64::ZAS3)
2928 .Case("za0v.h", AArch64::ZAH0)
2929 .Case("za1v.h", AArch64::ZAH1)
2930 .Case("za0v.b", AArch64::ZAB0)
2931 .Default(0);
2932 }
2933
parseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)2934 bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2935 SMLoc &EndLoc) {
2936 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2937 }
2938
tryParseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)2939 ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2940 SMLoc &EndLoc) {
2941 StartLoc = getLoc();
2942 ParseStatus Res = tryParseScalarRegister(Reg);
2943 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2944 return Res;
2945 }
2946
2947 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,RegKind Kind)2948 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2949 RegKind Kind) {
2950 unsigned RegNum = 0;
2951 if ((RegNum = matchSVEDataVectorRegName(Name)))
2952 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2953
2954 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2955 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2956
2957 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2958 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2959
2960 if ((RegNum = MatchNeonVectorRegName(Name)))
2961 return Kind == RegKind::NeonVector ? RegNum : 0;
2962
2963 if ((RegNum = matchMatrixRegName(Name)))
2964 return Kind == RegKind::Matrix ? RegNum : 0;
2965
2966 if (Name.equals_insensitive("zt0"))
2967 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2968
2969 // The parsed register must be of RegKind Scalar
2970 if ((RegNum = MatchRegisterName(Name)))
2971 return (Kind == RegKind::Scalar) ? RegNum : 0;
2972
2973 if (!RegNum) {
2974 // Handle a few common aliases of registers.
2975 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2976 .Case("fp", AArch64::FP)
2977 .Case("lr", AArch64::LR)
2978 .Case("x31", AArch64::XZR)
2979 .Case("w31", AArch64::WZR)
2980 .Default(0))
2981 return Kind == RegKind::Scalar ? RegNum : 0;
2982
2983 // Check for aliases registered via .req. Canonicalize to lower case.
2984 // That's more consistent since register names are case insensitive, and
2985 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2986 auto Entry = RegisterReqs.find(Name.lower());
2987 if (Entry == RegisterReqs.end())
2988 return 0;
2989
2990 // set RegNum if the match is the right kind of register
2991 if (Kind == Entry->getValue().first)
2992 RegNum = Entry->getValue().second;
2993 }
2994 return RegNum;
2995 }
2996
getNumRegsForRegKind(RegKind K)2997 unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2998 switch (K) {
2999 case RegKind::Scalar:
3000 case RegKind::NeonVector:
3001 case RegKind::SVEDataVector:
3002 return 32;
3003 case RegKind::Matrix:
3004 case RegKind::SVEPredicateVector:
3005 case RegKind::SVEPredicateAsCounter:
3006 return 16;
3007 case RegKind::LookupTable:
3008 return 1;
3009 }
3010 llvm_unreachable("Unsupported RegKind");
3011 }
3012
3013 /// tryParseScalarRegister - Try to parse a register name. The token must be an
3014 /// Identifier when called, and if it is a register name the token is eaten and
3015 /// the register is added to the operand list.
tryParseScalarRegister(MCRegister & RegNum)3016 ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3017 const AsmToken &Tok = getTok();
3018 if (Tok.isNot(AsmToken::Identifier))
3019 return ParseStatus::NoMatch;
3020
3021 std::string lowerCase = Tok.getString().lower();
3022 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3023 if (Reg == 0)
3024 return ParseStatus::NoMatch;
3025
3026 RegNum = Reg;
3027 Lex(); // Eat identifier token.
3028 return ParseStatus::Success;
3029 }
3030
3031 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
tryParseSysCROperand(OperandVector & Operands)3032 ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3033 SMLoc S = getLoc();
3034
3035 if (getTok().isNot(AsmToken::Identifier))
3036 return Error(S, "Expected cN operand where 0 <= N <= 15");
3037
3038 StringRef Tok = getTok().getIdentifier();
3039 if (Tok[0] != 'c' && Tok[0] != 'C')
3040 return Error(S, "Expected cN operand where 0 <= N <= 15");
3041
3042 uint32_t CRNum;
3043 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3044 if (BadNum || CRNum > 15)
3045 return Error(S, "Expected cN operand where 0 <= N <= 15");
3046
3047 Lex(); // Eat identifier token.
3048 Operands.push_back(
3049 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3050 return ParseStatus::Success;
3051 }
3052
3053 // Either an identifier for named values or a 6-bit immediate.
tryParseRPRFMOperand(OperandVector & Operands)3054 ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3055 SMLoc S = getLoc();
3056 const AsmToken &Tok = getTok();
3057
3058 unsigned MaxVal = 63;
3059
3060 // Immediate case, with optional leading hash:
3061 if (parseOptionalToken(AsmToken::Hash) ||
3062 Tok.is(AsmToken::Integer)) {
3063 const MCExpr *ImmVal;
3064 if (getParser().parseExpression(ImmVal))
3065 return ParseStatus::Failure;
3066
3067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3068 if (!MCE)
3069 return TokError("immediate value expected for prefetch operand");
3070 unsigned prfop = MCE->getValue();
3071 if (prfop > MaxVal)
3072 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3073 "] expected");
3074
3075 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3076 Operands.push_back(AArch64Operand::CreatePrefetch(
3077 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3078 return ParseStatus::Success;
3079 }
3080
3081 if (Tok.isNot(AsmToken::Identifier))
3082 return TokError("prefetch hint expected");
3083
3084 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3085 if (!RPRFM)
3086 return TokError("prefetch hint expected");
3087
3088 Operands.push_back(AArch64Operand::CreatePrefetch(
3089 RPRFM->Encoding, Tok.getString(), S, getContext()));
3090 Lex(); // Eat identifier token.
3091 return ParseStatus::Success;
3092 }
3093
3094 /// tryParsePrefetch - Try to parse a prefetch operand.
3095 template <bool IsSVEPrefetch>
tryParsePrefetch(OperandVector & Operands)3096 ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3097 SMLoc S = getLoc();
3098 const AsmToken &Tok = getTok();
3099
3100 auto LookupByName = [](StringRef N) {
3101 if (IsSVEPrefetch) {
3102 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3105 return std::optional<unsigned>(Res->Encoding);
3106 return std::optional<unsigned>();
3107 };
3108
3109 auto LookupByEncoding = [](unsigned E) {
3110 if (IsSVEPrefetch) {
3111 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3112 return std::optional<StringRef>(Res->Name);
3113 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3114 return std::optional<StringRef>(Res->Name);
3115 return std::optional<StringRef>();
3116 };
3117 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3118
3119 // Either an identifier for named values or a 5-bit immediate.
3120 // Eat optional hash.
3121 if (parseOptionalToken(AsmToken::Hash) ||
3122 Tok.is(AsmToken::Integer)) {
3123 const MCExpr *ImmVal;
3124 if (getParser().parseExpression(ImmVal))
3125 return ParseStatus::Failure;
3126
3127 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3128 if (!MCE)
3129 return TokError("immediate value expected for prefetch operand");
3130 unsigned prfop = MCE->getValue();
3131 if (prfop > MaxVal)
3132 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3133 "] expected");
3134
3135 auto PRFM = LookupByEncoding(MCE->getValue());
3136 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3137 S, getContext()));
3138 return ParseStatus::Success;
3139 }
3140
3141 if (Tok.isNot(AsmToken::Identifier))
3142 return TokError("prefetch hint expected");
3143
3144 auto PRFM = LookupByName(Tok.getString());
3145 if (!PRFM)
3146 return TokError("prefetch hint expected");
3147
3148 Operands.push_back(AArch64Operand::CreatePrefetch(
3149 *PRFM, Tok.getString(), S, getContext()));
3150 Lex(); // Eat identifier token.
3151 return ParseStatus::Success;
3152 }
3153
3154 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
tryParsePSBHint(OperandVector & Operands)3155 ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3156 SMLoc S = getLoc();
3157 const AsmToken &Tok = getTok();
3158 if (Tok.isNot(AsmToken::Identifier))
3159 return TokError("invalid operand for instruction");
3160
3161 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3162 if (!PSB)
3163 return TokError("invalid operand for instruction");
3164
3165 Operands.push_back(AArch64Operand::CreatePSBHint(
3166 PSB->Encoding, Tok.getString(), S, getContext()));
3167 Lex(); // Eat identifier token.
3168 return ParseStatus::Success;
3169 }
3170
tryParseSyspXzrPair(OperandVector & Operands)3171 ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3172 SMLoc StartLoc = getLoc();
3173
3174 MCRegister RegNum;
3175
3176 // The case where xzr, xzr is not present is handled by an InstAlias.
3177
3178 auto RegTok = getTok(); // in case we need to backtrack
3179 if (!tryParseScalarRegister(RegNum).isSuccess())
3180 return ParseStatus::NoMatch;
3181
3182 if (RegNum != AArch64::XZR) {
3183 getLexer().UnLex(RegTok);
3184 return ParseStatus::NoMatch;
3185 }
3186
3187 if (parseComma())
3188 return ParseStatus::Failure;
3189
3190 if (!tryParseScalarRegister(RegNum).isSuccess())
3191 return TokError("expected register operand");
3192
3193 if (RegNum != AArch64::XZR)
3194 return TokError("xzr must be followed by xzr");
3195
3196 // We need to push something, since we claim this is an operand in .td.
3197 // See also AArch64AsmParser::parseKeywordOperand.
3198 Operands.push_back(AArch64Operand::CreateReg(
3199 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3200
3201 return ParseStatus::Success;
3202 }
3203
3204 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
tryParseBTIHint(OperandVector & Operands)3205 ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3206 SMLoc S = getLoc();
3207 const AsmToken &Tok = getTok();
3208 if (Tok.isNot(AsmToken::Identifier))
3209 return TokError("invalid operand for instruction");
3210
3211 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3212 if (!BTI)
3213 return TokError("invalid operand for instruction");
3214
3215 Operands.push_back(AArch64Operand::CreateBTIHint(
3216 BTI->Encoding, Tok.getString(), S, getContext()));
3217 Lex(); // Eat identifier token.
3218 return ParseStatus::Success;
3219 }
3220
3221 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3222 /// instruction.
tryParseAdrpLabel(OperandVector & Operands)3223 ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3224 SMLoc S = getLoc();
3225 const MCExpr *Expr = nullptr;
3226
3227 if (getTok().is(AsmToken::Hash)) {
3228 Lex(); // Eat hash token.
3229 }
3230
3231 if (parseSymbolicImmVal(Expr))
3232 return ParseStatus::Failure;
3233
3234 AArch64MCExpr::VariantKind ELFRefKind;
3235 MCSymbolRefExpr::VariantKind DarwinRefKind;
3236 int64_t Addend;
3237 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3238 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3239 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3240 // No modifier was specified at all; this is the syntax for an ELF basic
3241 // ADRP relocation (unfortunately).
3242 Expr =
3243 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3244 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3245 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3246 Addend != 0) {
3247 return Error(S, "gotpage label reference not allowed an addend");
3248 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3249 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3250 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3251 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3252 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3253 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3254 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3255 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3256 // The operand must be an @page or @gotpage qualified symbolref.
3257 return Error(S, "page or gotpage label reference expected");
3258 }
3259 }
3260
3261 // We have either a label reference possibly with addend or an immediate. The
3262 // addend is a raw value here. The linker will adjust it to only reference the
3263 // page.
3264 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3265 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3266
3267 return ParseStatus::Success;
3268 }
3269
3270 /// tryParseAdrLabel - Parse and validate a source label for the ADR
3271 /// instruction.
tryParseAdrLabel(OperandVector & Operands)3272 ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3273 SMLoc S = getLoc();
3274 const MCExpr *Expr = nullptr;
3275
3276 // Leave anything with a bracket to the default for SVE
3277 if (getTok().is(AsmToken::LBrac))
3278 return ParseStatus::NoMatch;
3279
3280 if (getTok().is(AsmToken::Hash))
3281 Lex(); // Eat hash token.
3282
3283 if (parseSymbolicImmVal(Expr))
3284 return ParseStatus::Failure;
3285
3286 AArch64MCExpr::VariantKind ELFRefKind;
3287 MCSymbolRefExpr::VariantKind DarwinRefKind;
3288 int64_t Addend;
3289 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3290 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3291 ELFRefKind == AArch64MCExpr::VK_INVALID) {
3292 // No modifier was specified at all; this is the syntax for an ELF basic
3293 // ADR relocation (unfortunately).
3294 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3295 } else {
3296 return Error(S, "unexpected adr label");
3297 }
3298 }
3299
3300 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3301 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3302 return ParseStatus::Success;
3303 }
3304
3305 /// tryParseFPImm - A floating point immediate expression operand.
3306 template <bool AddFPZeroAsLiteral>
tryParseFPImm(OperandVector & Operands)3307 ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3308 SMLoc S = getLoc();
3309
3310 bool Hash = parseOptionalToken(AsmToken::Hash);
3311
3312 // Handle negation, as that still comes through as a separate token.
3313 bool isNegative = parseOptionalToken(AsmToken::Minus);
3314
3315 const AsmToken &Tok = getTok();
3316 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3317 if (!Hash)
3318 return ParseStatus::NoMatch;
3319 return TokError("invalid floating point immediate");
3320 }
3321
3322 // Parse hexadecimal representation.
3323 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3324 if (Tok.getIntVal() > 255 || isNegative)
3325 return TokError("encoded floating point value out of range");
3326
3327 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3328 Operands.push_back(
3329 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3330 } else {
3331 // Parse FP representation.
3332 APFloat RealVal(APFloat::IEEEdouble());
3333 auto StatusOrErr =
3334 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3335 if (errorToBool(StatusOrErr.takeError()))
3336 return TokError("invalid floating point representation");
3337
3338 if (isNegative)
3339 RealVal.changeSign();
3340
3341 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3342 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3343 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3344 } else
3345 Operands.push_back(AArch64Operand::CreateFPImm(
3346 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3347 }
3348
3349 Lex(); // Eat the token.
3350
3351 return ParseStatus::Success;
3352 }
3353
3354 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3355 /// a shift suffix, for example '#1, lsl #12'.
3356 ParseStatus
tryParseImmWithOptionalShift(OperandVector & Operands)3357 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3358 SMLoc S = getLoc();
3359
3360 if (getTok().is(AsmToken::Hash))
3361 Lex(); // Eat '#'
3362 else if (getTok().isNot(AsmToken::Integer))
3363 // Operand should start from # or should be integer, emit error otherwise.
3364 return ParseStatus::NoMatch;
3365
3366 if (getTok().is(AsmToken::Integer) &&
3367 getLexer().peekTok().is(AsmToken::Colon))
3368 return tryParseImmRange(Operands);
3369
3370 const MCExpr *Imm = nullptr;
3371 if (parseSymbolicImmVal(Imm))
3372 return ParseStatus::Failure;
3373 else if (getTok().isNot(AsmToken::Comma)) {
3374 Operands.push_back(
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3376 return ParseStatus::Success;
3377 }
3378
3379 // Eat ','
3380 Lex();
3381 StringRef VecGroup;
3382 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3383 Operands.push_back(
3384 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3385 Operands.push_back(
3386 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3387 return ParseStatus::Success;
3388 }
3389
3390 // The optional operand must be "lsl #N" where N is non-negative.
3391 if (!getTok().is(AsmToken::Identifier) ||
3392 !getTok().getIdentifier().equals_insensitive("lsl"))
3393 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3394
3395 // Eat 'lsl'
3396 Lex();
3397
3398 parseOptionalToken(AsmToken::Hash);
3399
3400 if (getTok().isNot(AsmToken::Integer))
3401 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3402
3403 int64_t ShiftAmount = getTok().getIntVal();
3404
3405 if (ShiftAmount < 0)
3406 return Error(getLoc(), "positive shift amount required");
3407 Lex(); // Eat the number
3408
3409 // Just in case the optional lsl #0 is used for immediates other than zero.
3410 if (ShiftAmount == 0 && Imm != nullptr) {
3411 Operands.push_back(
3412 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3413 return ParseStatus::Success;
3414 }
3415
3416 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3417 getLoc(), getContext()));
3418 return ParseStatus::Success;
3419 }
3420
3421 /// parseCondCodeString - Parse a Condition Code string, optionally returning a
3422 /// suggestion to help common typos.
3423 AArch64CC::CondCode
parseCondCodeString(StringRef Cond,std::string & Suggestion)3424 AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3425 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3426 .Case("eq", AArch64CC::EQ)
3427 .Case("ne", AArch64CC::NE)
3428 .Case("cs", AArch64CC::HS)
3429 .Case("hs", AArch64CC::HS)
3430 .Case("cc", AArch64CC::LO)
3431 .Case("lo", AArch64CC::LO)
3432 .Case("mi", AArch64CC::MI)
3433 .Case("pl", AArch64CC::PL)
3434 .Case("vs", AArch64CC::VS)
3435 .Case("vc", AArch64CC::VC)
3436 .Case("hi", AArch64CC::HI)
3437 .Case("ls", AArch64CC::LS)
3438 .Case("ge", AArch64CC::GE)
3439 .Case("lt", AArch64CC::LT)
3440 .Case("gt", AArch64CC::GT)
3441 .Case("le", AArch64CC::LE)
3442 .Case("al", AArch64CC::AL)
3443 .Case("nv", AArch64CC::NV)
3444 .Default(AArch64CC::Invalid);
3445
3446 if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3447 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3448 .Case("none", AArch64CC::EQ)
3449 .Case("any", AArch64CC::NE)
3450 .Case("nlast", AArch64CC::HS)
3451 .Case("last", AArch64CC::LO)
3452 .Case("first", AArch64CC::MI)
3453 .Case("nfrst", AArch64CC::PL)
3454 .Case("pmore", AArch64CC::HI)
3455 .Case("plast", AArch64CC::LS)
3456 .Case("tcont", AArch64CC::GE)
3457 .Case("tstop", AArch64CC::LT)
3458 .Default(AArch64CC::Invalid);
3459
3460 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3461 Suggestion = "nfrst";
3462 }
3463 return CC;
3464 }
3465
3466 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)3467 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3468 bool invertCondCode) {
3469 SMLoc S = getLoc();
3470 const AsmToken &Tok = getTok();
3471 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3472
3473 StringRef Cond = Tok.getString();
3474 std::string Suggestion;
3475 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3476 if (CC == AArch64CC::Invalid) {
3477 std::string Msg = "invalid condition code";
3478 if (!Suggestion.empty())
3479 Msg += ", did you mean " + Suggestion + "?";
3480 return TokError(Msg);
3481 }
3482 Lex(); // Eat identifier token.
3483
3484 if (invertCondCode) {
3485 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3486 return TokError("condition codes AL and NV are invalid for this instruction");
3487 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3488 }
3489
3490 Operands.push_back(
3491 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3492 return false;
3493 }
3494
tryParseSVCR(OperandVector & Operands)3495 ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3496 const AsmToken &Tok = getTok();
3497 SMLoc S = getLoc();
3498
3499 if (Tok.isNot(AsmToken::Identifier))
3500 return TokError("invalid operand for instruction");
3501
3502 unsigned PStateImm = -1;
3503 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3504 if (!SVCR)
3505 return ParseStatus::NoMatch;
3506 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3507 PStateImm = SVCR->Encoding;
3508
3509 Operands.push_back(
3510 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3511 Lex(); // Eat identifier token.
3512 return ParseStatus::Success;
3513 }
3514
tryParseMatrixRegister(OperandVector & Operands)3515 ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3516 const AsmToken &Tok = getTok();
3517 SMLoc S = getLoc();
3518
3519 StringRef Name = Tok.getString();
3520
3521 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3522 Lex(); // eat "za[.(b|h|s|d)]"
3523 unsigned ElementWidth = 0;
3524 auto DotPosition = Name.find('.');
3525 if (DotPosition != StringRef::npos) {
3526 const auto &KindRes =
3527 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3528 if (!KindRes)
3529 return TokError(
3530 "Expected the register to be followed by element width suffix");
3531 ElementWidth = KindRes->second;
3532 }
3533 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3534 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3535 getContext()));
3536 if (getLexer().is(AsmToken::LBrac)) {
3537 // There's no comma after matrix operand, so we can parse the next operand
3538 // immediately.
3539 if (parseOperand(Operands, false, false))
3540 return ParseStatus::NoMatch;
3541 }
3542 return ParseStatus::Success;
3543 }
3544
3545 // Try to parse matrix register.
3546 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3547 if (!Reg)
3548 return ParseStatus::NoMatch;
3549
3550 size_t DotPosition = Name.find('.');
3551 assert(DotPosition != StringRef::npos && "Unexpected register");
3552
3553 StringRef Head = Name.take_front(DotPosition);
3554 StringRef Tail = Name.drop_front(DotPosition);
3555 StringRef RowOrColumn = Head.take_back();
3556
3557 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3558 .Case("h", MatrixKind::Row)
3559 .Case("v", MatrixKind::Col)
3560 .Default(MatrixKind::Tile);
3561
3562 // Next up, parsing the suffix
3563 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3564 if (!KindRes)
3565 return TokError(
3566 "Expected the register to be followed by element width suffix");
3567 unsigned ElementWidth = KindRes->second;
3568
3569 Lex();
3570
3571 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3572 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3573
3574 if (getLexer().is(AsmToken::LBrac)) {
3575 // There's no comma after matrix operand, so we can parse the next operand
3576 // immediately.
3577 if (parseOperand(Operands, false, false))
3578 return ParseStatus::NoMatch;
3579 }
3580 return ParseStatus::Success;
3581 }
3582
3583 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3584 /// them if present.
3585 ParseStatus
tryParseOptionalShiftExtend(OperandVector & Operands)3586 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3587 const AsmToken &Tok = getTok();
3588 std::string LowerID = Tok.getString().lower();
3589 AArch64_AM::ShiftExtendType ShOp =
3590 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3591 .Case("lsl", AArch64_AM::LSL)
3592 .Case("lsr", AArch64_AM::LSR)
3593 .Case("asr", AArch64_AM::ASR)
3594 .Case("ror", AArch64_AM::ROR)
3595 .Case("msl", AArch64_AM::MSL)
3596 .Case("uxtb", AArch64_AM::UXTB)
3597 .Case("uxth", AArch64_AM::UXTH)
3598 .Case("uxtw", AArch64_AM::UXTW)
3599 .Case("uxtx", AArch64_AM::UXTX)
3600 .Case("sxtb", AArch64_AM::SXTB)
3601 .Case("sxth", AArch64_AM::SXTH)
3602 .Case("sxtw", AArch64_AM::SXTW)
3603 .Case("sxtx", AArch64_AM::SXTX)
3604 .Default(AArch64_AM::InvalidShiftExtend);
3605
3606 if (ShOp == AArch64_AM::InvalidShiftExtend)
3607 return ParseStatus::NoMatch;
3608
3609 SMLoc S = Tok.getLoc();
3610 Lex();
3611
3612 bool Hash = parseOptionalToken(AsmToken::Hash);
3613
3614 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3615 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3616 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3617 ShOp == AArch64_AM::MSL) {
3618 // We expect a number here.
3619 return TokError("expected #imm after shift specifier");
3620 }
3621
3622 // "extend" type operations don't need an immediate, #0 is implicit.
3623 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3624 Operands.push_back(
3625 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3626 return ParseStatus::Success;
3627 }
3628
3629 // Make sure we do actually have a number, identifier or a parenthesized
3630 // expression.
3631 SMLoc E = getLoc();
3632 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3633 !getTok().is(AsmToken::Identifier))
3634 return Error(E, "expected integer shift amount");
3635
3636 const MCExpr *ImmVal;
3637 if (getParser().parseExpression(ImmVal))
3638 return ParseStatus::Failure;
3639
3640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3641 if (!MCE)
3642 return Error(E, "expected constant '#imm' after shift specifier");
3643
3644 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3645 Operands.push_back(AArch64Operand::CreateShiftExtend(
3646 ShOp, MCE->getValue(), true, S, E, getContext()));
3647 return ParseStatus::Success;
3648 }
3649
3650 static const struct Extension {
3651 const char *Name;
3652 const FeatureBitset Features;
3653 } ExtensionMap[] = {
3654 {"crc", {AArch64::FeatureCRC}},
3655 {"sm4", {AArch64::FeatureSM4}},
3656 {"sha3", {AArch64::FeatureSHA3}},
3657 {"sha2", {AArch64::FeatureSHA2}},
3658 {"aes", {AArch64::FeatureAES}},
3659 {"crypto", {AArch64::FeatureCrypto}},
3660 {"fp", {AArch64::FeatureFPARMv8}},
3661 {"simd", {AArch64::FeatureNEON}},
3662 {"ras", {AArch64::FeatureRAS}},
3663 {"rasv2", {AArch64::FeatureRASv2}},
3664 {"lse", {AArch64::FeatureLSE}},
3665 {"predres", {AArch64::FeaturePredRes}},
3666 {"predres2", {AArch64::FeatureSPECRES2}},
3667 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3668 {"mte", {AArch64::FeatureMTE}},
3669 {"memtag", {AArch64::FeatureMTE}},
3670 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3671 {"pan", {AArch64::FeaturePAN}},
3672 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3673 {"ccpp", {AArch64::FeatureCCPP}},
3674 {"rcpc", {AArch64::FeatureRCPC}},
3675 {"rng", {AArch64::FeatureRandGen}},
3676 {"sve", {AArch64::FeatureSVE}},
3677 {"sve2", {AArch64::FeatureSVE2}},
3678 {"sve2-aes", {AArch64::FeatureSVE2AES}},
3679 {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3680 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3681 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3682 {"sve2p1", {AArch64::FeatureSVE2p1}},
3683 {"b16b16", {AArch64::FeatureB16B16}},
3684 {"ls64", {AArch64::FeatureLS64}},
3685 {"xs", {AArch64::FeatureXS}},
3686 {"pauth", {AArch64::FeaturePAuth}},
3687 {"flagm", {AArch64::FeatureFlagM}},
3688 {"rme", {AArch64::FeatureRME}},
3689 {"sme", {AArch64::FeatureSME}},
3690 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3691 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3692 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3693 {"sme2", {AArch64::FeatureSME2}},
3694 {"sme2p1", {AArch64::FeatureSME2p1}},
3695 {"hbc", {AArch64::FeatureHBC}},
3696 {"mops", {AArch64::FeatureMOPS}},
3697 {"mec", {AArch64::FeatureMEC}},
3698 {"the", {AArch64::FeatureTHE}},
3699 {"d128", {AArch64::FeatureD128}},
3700 {"lse128", {AArch64::FeatureLSE128}},
3701 {"ite", {AArch64::FeatureITE}},
3702 {"cssc", {AArch64::FeatureCSSC}},
3703 {"rcpc3", {AArch64::FeatureRCPC3}},
3704 {"gcs", {AArch64::FeatureGCS}},
3705 {"bf16", {AArch64::FeatureBF16}},
3706 {"compnum", {AArch64::FeatureComplxNum}},
3707 {"dotprod", {AArch64::FeatureDotProd}},
3708 {"f32mm", {AArch64::FeatureMatMulFP32}},
3709 {"f64mm", {AArch64::FeatureMatMulFP64}},
3710 {"fp16", {AArch64::FeatureFullFP16}},
3711 {"fp16fml", {AArch64::FeatureFP16FML}},
3712 {"i8mm", {AArch64::FeatureMatMulInt8}},
3713 {"lor", {AArch64::FeatureLOR}},
3714 {"profile", {AArch64::FeatureSPE}},
3715 // "rdma" is the name documented by binutils for the feature, but
3716 // binutils also accepts incomplete prefixes of features, so "rdm"
3717 // works too. Support both spellings here.
3718 {"rdm", {AArch64::FeatureRDM}},
3719 {"rdma", {AArch64::FeatureRDM}},
3720 {"sb", {AArch64::FeatureSB}},
3721 {"ssbs", {AArch64::FeatureSSBS}},
3722 {"tme", {AArch64::FeatureTME}},
3723 {"fp8", {AArch64::FeatureFP8}},
3724 {"faminmax", {AArch64::FeatureFAMINMAX}},
3725 {"fp8fma", {AArch64::FeatureFP8FMA}},
3726 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3727 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3728 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3729 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3730 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3731 {"lut", {AArch64::FeatureLUT}},
3732 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3733 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3734 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3735 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3736 {"cpa", {AArch64::FeatureCPA}},
3737 {"tlbiw", {AArch64::FeatureTLBIW}},
3738 };
3739
setRequiredFeatureString(FeatureBitset FBS,std::string & Str)3740 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3741 if (FBS[AArch64::HasV8_0aOps])
3742 Str += "ARMv8a";
3743 if (FBS[AArch64::HasV8_1aOps])
3744 Str += "ARMv8.1a";
3745 else if (FBS[AArch64::HasV8_2aOps])
3746 Str += "ARMv8.2a";
3747 else if (FBS[AArch64::HasV8_3aOps])
3748 Str += "ARMv8.3a";
3749 else if (FBS[AArch64::HasV8_4aOps])
3750 Str += "ARMv8.4a";
3751 else if (FBS[AArch64::HasV8_5aOps])
3752 Str += "ARMv8.5a";
3753 else if (FBS[AArch64::HasV8_6aOps])
3754 Str += "ARMv8.6a";
3755 else if (FBS[AArch64::HasV8_7aOps])
3756 Str += "ARMv8.7a";
3757 else if (FBS[AArch64::HasV8_8aOps])
3758 Str += "ARMv8.8a";
3759 else if (FBS[AArch64::HasV8_9aOps])
3760 Str += "ARMv8.9a";
3761 else if (FBS[AArch64::HasV9_0aOps])
3762 Str += "ARMv9-a";
3763 else if (FBS[AArch64::HasV9_1aOps])
3764 Str += "ARMv9.1a";
3765 else if (FBS[AArch64::HasV9_2aOps])
3766 Str += "ARMv9.2a";
3767 else if (FBS[AArch64::HasV9_3aOps])
3768 Str += "ARMv9.3a";
3769 else if (FBS[AArch64::HasV9_4aOps])
3770 Str += "ARMv9.4a";
3771 else if (FBS[AArch64::HasV9_5aOps])
3772 Str += "ARMv9.5a";
3773 else if (FBS[AArch64::HasV8_0rOps])
3774 Str += "ARMv8r";
3775 else {
3776 SmallVector<std::string, 2> ExtMatches;
3777 for (const auto& Ext : ExtensionMap) {
3778 // Use & in case multiple features are enabled
3779 if ((FBS & Ext.Features) != FeatureBitset())
3780 ExtMatches.push_back(Ext.Name);
3781 }
3782 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3783 }
3784 }
3785
createSysAlias(uint16_t Encoding,OperandVector & Operands,SMLoc S)3786 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3787 SMLoc S) {
3788 const uint16_t Op2 = Encoding & 7;
3789 const uint16_t Cm = (Encoding & 0x78) >> 3;
3790 const uint16_t Cn = (Encoding & 0x780) >> 7;
3791 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3792
3793 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3794
3795 Operands.push_back(
3796 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3797 Operands.push_back(
3798 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3799 Operands.push_back(
3800 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3801 Expr = MCConstantExpr::create(Op2, getContext());
3802 Operands.push_back(
3803 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3804 }
3805
3806 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3807 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)3808 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3809 OperandVector &Operands) {
3810 if (Name.contains('.'))
3811 return TokError("invalid operand");
3812
3813 Mnemonic = Name;
3814 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3815
3816 const AsmToken &Tok = getTok();
3817 StringRef Op = Tok.getString();
3818 SMLoc S = Tok.getLoc();
3819
3820 if (Mnemonic == "ic") {
3821 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3822 if (!IC)
3823 return TokError("invalid operand for IC instruction");
3824 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3825 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3826 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3827 return TokError(Str);
3828 }
3829 createSysAlias(IC->Encoding, Operands, S);
3830 } else if (Mnemonic == "dc") {
3831 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3832 if (!DC)
3833 return TokError("invalid operand for DC instruction");
3834 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3835 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3836 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3837 return TokError(Str);
3838 }
3839 createSysAlias(DC->Encoding, Operands, S);
3840 } else if (Mnemonic == "at") {
3841 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3842 if (!AT)
3843 return TokError("invalid operand for AT instruction");
3844 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3845 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3846 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3847 return TokError(Str);
3848 }
3849 createSysAlias(AT->Encoding, Operands, S);
3850 } else if (Mnemonic == "tlbi") {
3851 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3852 if (!TLBI)
3853 return TokError("invalid operand for TLBI instruction");
3854 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3855 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3856 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3857 return TokError(Str);
3858 }
3859 createSysAlias(TLBI->Encoding, Operands, S);
3860 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3861
3862 if (Op.lower() != "rctx")
3863 return TokError("invalid operand for prediction restriction instruction");
3864
3865 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3866 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3867 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3868
3869 if (Mnemonic == "cosp" && !hasSpecres2)
3870 return TokError("COSP requires: predres2");
3871 if (!hasPredres)
3872 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3873
3874 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3875 : Mnemonic == "dvp" ? 0b101
3876 : Mnemonic == "cosp" ? 0b110
3877 : Mnemonic == "cpp" ? 0b111
3878 : 0;
3879 assert(PRCTX_Op2 &&
3880 "Invalid mnemonic for prediction restriction instruction");
3881 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3882 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3883
3884 createSysAlias(Encoding, Operands, S);
3885 }
3886
3887 Lex(); // Eat operand.
3888
3889 bool ExpectRegister = !Op.contains_insensitive("all");
3890 bool HasRegister = false;
3891
3892 // Check for the optional register operand.
3893 if (parseOptionalToken(AsmToken::Comma)) {
3894 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3895 return TokError("expected register operand");
3896 HasRegister = true;
3897 }
3898
3899 if (ExpectRegister && !HasRegister)
3900 return TokError("specified " + Mnemonic + " op requires a register");
3901 else if (!ExpectRegister && HasRegister)
3902 return TokError("specified " + Mnemonic + " op does not use a register");
3903
3904 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3905 return true;
3906
3907 return false;
3908 }
3909
3910 /// parseSyspAlias - The TLBIP instructions are simple aliases for
3911 /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
parseSyspAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)3912 bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3913 OperandVector &Operands) {
3914 if (Name.contains('.'))
3915 return TokError("invalid operand");
3916
3917 Mnemonic = Name;
3918 Operands.push_back(
3919 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3920
3921 const AsmToken &Tok = getTok();
3922 StringRef Op = Tok.getString();
3923 SMLoc S = Tok.getLoc();
3924
3925 if (Mnemonic == "tlbip") {
3926 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3927 if (HasnXSQualifier) {
3928 Op = Op.drop_back(3);
3929 }
3930 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3931 if (!TLBIorig)
3932 return TokError("invalid operand for TLBIP instruction");
3933 const AArch64TLBI::TLBI TLBI(
3934 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3935 TLBIorig->NeedsReg,
3936 HasnXSQualifier
3937 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3938 : TLBIorig->FeaturesRequired);
3939 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3940 std::string Name =
3941 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3942 std::string Str("TLBIP " + Name + " requires: ");
3943 setRequiredFeatureString(TLBI.getRequiredFeatures(), Str);
3944 return TokError(Str);
3945 }
3946 createSysAlias(TLBI.Encoding, Operands, S);
3947 }
3948
3949 Lex(); // Eat operand.
3950
3951 if (parseComma())
3952 return true;
3953
3954 if (Tok.isNot(AsmToken::Identifier))
3955 return TokError("expected register identifier");
3956 auto Result = tryParseSyspXzrPair(Operands);
3957 if (Result.isNoMatch())
3958 Result = tryParseGPRSeqPair(Operands);
3959 if (!Result.isSuccess())
3960 return TokError("specified " + Mnemonic +
3961 " op requires a pair of registers");
3962
3963 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3964 return true;
3965
3966 return false;
3967 }
3968
tryParseBarrierOperand(OperandVector & Operands)3969 ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3970 MCAsmParser &Parser = getParser();
3971 const AsmToken &Tok = getTok();
3972
3973 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3974 return TokError("'csync' operand expected");
3975 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3976 // Immediate operand.
3977 const MCExpr *ImmVal;
3978 SMLoc ExprLoc = getLoc();
3979 AsmToken IntTok = Tok;
3980 if (getParser().parseExpression(ImmVal))
3981 return ParseStatus::Failure;
3982 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3983 if (!MCE)
3984 return Error(ExprLoc, "immediate value expected for barrier operand");
3985 int64_t Value = MCE->getValue();
3986 if (Mnemonic == "dsb" && Value > 15) {
3987 // This case is a no match here, but it might be matched by the nXS
3988 // variant. Deliberately not unlex the optional '#' as it is not necessary
3989 // to characterize an integer immediate.
3990 Parser.getLexer().UnLex(IntTok);
3991 return ParseStatus::NoMatch;
3992 }
3993 if (Value < 0 || Value > 15)
3994 return Error(ExprLoc, "barrier operand out of range");
3995 auto DB = AArch64DB::lookupDBByEncoding(Value);
3996 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3997 ExprLoc, getContext(),
3998 false /*hasnXSModifier*/));
3999 return ParseStatus::Success;
4000 }
4001
4002 if (Tok.isNot(AsmToken::Identifier))
4003 return TokError("invalid operand for instruction");
4004
4005 StringRef Operand = Tok.getString();
4006 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4007 auto DB = AArch64DB::lookupDBByName(Operand);
4008 // The only valid named option for ISB is 'sy'
4009 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4010 return TokError("'sy' or #imm operand expected");
4011 // The only valid named option for TSB is 'csync'
4012 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4013 return TokError("'csync' operand expected");
4014 if (!DB && !TSB) {
4015 if (Mnemonic == "dsb") {
4016 // This case is a no match here, but it might be matched by the nXS
4017 // variant.
4018 return ParseStatus::NoMatch;
4019 }
4020 return TokError("invalid barrier option name");
4021 }
4022
4023 Operands.push_back(AArch64Operand::CreateBarrier(
4024 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4025 getContext(), false /*hasnXSModifier*/));
4026 Lex(); // Consume the option
4027
4028 return ParseStatus::Success;
4029 }
4030
4031 ParseStatus
tryParseBarriernXSOperand(OperandVector & Operands)4032 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4033 const AsmToken &Tok = getTok();
4034
4035 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4036 if (Mnemonic != "dsb")
4037 return ParseStatus::Failure;
4038
4039 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4040 // Immediate operand.
4041 const MCExpr *ImmVal;
4042 SMLoc ExprLoc = getLoc();
4043 if (getParser().parseExpression(ImmVal))
4044 return ParseStatus::Failure;
4045 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4046 if (!MCE)
4047 return Error(ExprLoc, "immediate value expected for barrier operand");
4048 int64_t Value = MCE->getValue();
4049 // v8.7-A DSB in the nXS variant accepts only the following immediate
4050 // values: 16, 20, 24, 28.
4051 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4052 return Error(ExprLoc, "barrier operand out of range");
4053 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4054 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4055 ExprLoc, getContext(),
4056 true /*hasnXSModifier*/));
4057 return ParseStatus::Success;
4058 }
4059
4060 if (Tok.isNot(AsmToken::Identifier))
4061 return TokError("invalid operand for instruction");
4062
4063 StringRef Operand = Tok.getString();
4064 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4065
4066 if (!DB)
4067 return TokError("invalid barrier option name");
4068
4069 Operands.push_back(
4070 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4071 getContext(), true /*hasnXSModifier*/));
4072 Lex(); // Consume the option
4073
4074 return ParseStatus::Success;
4075 }
4076
tryParseSysReg(OperandVector & Operands)4077 ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4078 const AsmToken &Tok = getTok();
4079
4080 if (Tok.isNot(AsmToken::Identifier))
4081 return ParseStatus::NoMatch;
4082
4083 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4084 return ParseStatus::NoMatch;
4085
4086 int MRSReg, MSRReg;
4087 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4088 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4089 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4090 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4091 } else
4092 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4093
4094 unsigned PStateImm = -1;
4095 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4096 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4097 PStateImm = PState15->Encoding;
4098 if (!PState15) {
4099 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4100 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4101 PStateImm = PState1->Encoding;
4102 }
4103
4104 Operands.push_back(
4105 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4106 PStateImm, getContext()));
4107 Lex(); // Eat identifier
4108
4109 return ParseStatus::Success;
4110 }
4111
4112 /// tryParseNeonVectorRegister - Parse a vector register operand.
tryParseNeonVectorRegister(OperandVector & Operands)4113 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4114 if (getTok().isNot(AsmToken::Identifier))
4115 return true;
4116
4117 SMLoc S = getLoc();
4118 // Check for a vector register specifier first.
4119 StringRef Kind;
4120 MCRegister Reg;
4121 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4122 if (!Res.isSuccess())
4123 return true;
4124
4125 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4126 if (!KindRes)
4127 return true;
4128
4129 unsigned ElementWidth = KindRes->second;
4130 Operands.push_back(
4131 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4132 S, getLoc(), getContext()));
4133
4134 // If there was an explicit qualifier, that goes on as a literal text
4135 // operand.
4136 if (!Kind.empty())
4137 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4138
4139 return tryParseVectorIndex(Operands).isFailure();
4140 }
4141
tryParseVectorIndex(OperandVector & Operands)4142 ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4143 SMLoc SIdx = getLoc();
4144 if (parseOptionalToken(AsmToken::LBrac)) {
4145 const MCExpr *ImmVal;
4146 if (getParser().parseExpression(ImmVal))
4147 return ParseStatus::NoMatch;
4148 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4149 if (!MCE)
4150 return TokError("immediate value expected for vector index");
4151
4152 SMLoc E = getLoc();
4153
4154 if (parseToken(AsmToken::RBrac, "']' expected"))
4155 return ParseStatus::Failure;
4156
4157 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4158 E, getContext()));
4159 return ParseStatus::Success;
4160 }
4161
4162 return ParseStatus::NoMatch;
4163 }
4164
4165 // tryParseVectorRegister - Try to parse a vector register name with
4166 // optional kind specifier. If it is a register specifier, eat the token
4167 // and return it.
tryParseVectorRegister(MCRegister & Reg,StringRef & Kind,RegKind MatchKind)4168 ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4169 StringRef &Kind,
4170 RegKind MatchKind) {
4171 const AsmToken &Tok = getTok();
4172
4173 if (Tok.isNot(AsmToken::Identifier))
4174 return ParseStatus::NoMatch;
4175
4176 StringRef Name = Tok.getString();
4177 // If there is a kind specifier, it's separated from the register name by
4178 // a '.'.
4179 size_t Start = 0, Next = Name.find('.');
4180 StringRef Head = Name.slice(Start, Next);
4181 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4182
4183 if (RegNum) {
4184 if (Next != StringRef::npos) {
4185 Kind = Name.slice(Next, StringRef::npos);
4186 if (!isValidVectorKind(Kind, MatchKind))
4187 return TokError("invalid vector kind qualifier");
4188 }
4189 Lex(); // Eat the register token.
4190
4191 Reg = RegNum;
4192 return ParseStatus::Success;
4193 }
4194
4195 return ParseStatus::NoMatch;
4196 }
4197
tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector & Operands)4198 ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4199 OperandVector &Operands) {
4200 ParseStatus Status =
4201 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4202 if (!Status.isSuccess())
4203 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4204 return Status;
4205 }
4206
4207 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4208 template <RegKind RK>
4209 ParseStatus
tryParseSVEPredicateVector(OperandVector & Operands)4210 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4211 // Check for a SVE predicate register specifier first.
4212 const SMLoc S = getLoc();
4213 StringRef Kind;
4214 MCRegister RegNum;
4215 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4216 if (!Res.isSuccess())
4217 return Res;
4218
4219 const auto &KindRes = parseVectorKind(Kind, RK);
4220 if (!KindRes)
4221 return ParseStatus::NoMatch;
4222
4223 unsigned ElementWidth = KindRes->second;
4224 Operands.push_back(AArch64Operand::CreateVectorReg(
4225 RegNum, RK, ElementWidth, S,
4226 getLoc(), getContext()));
4227
4228 if (getLexer().is(AsmToken::LBrac)) {
4229 if (RK == RegKind::SVEPredicateAsCounter) {
4230 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4231 if (ResIndex.isSuccess())
4232 return ParseStatus::Success;
4233 } else {
4234 // Indexed predicate, there's no comma so try parse the next operand
4235 // immediately.
4236 if (parseOperand(Operands, false, false))
4237 return ParseStatus::NoMatch;
4238 }
4239 }
4240
4241 // Not all predicates are followed by a '/m' or '/z'.
4242 if (getTok().isNot(AsmToken::Slash))
4243 return ParseStatus::Success;
4244
4245 // But when they do they shouldn't have an element type suffix.
4246 if (!Kind.empty())
4247 return Error(S, "not expecting size suffix");
4248
4249 // Add a literal slash as operand
4250 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4251
4252 Lex(); // Eat the slash.
4253
4254 // Zeroing or merging?
4255 auto Pred = getTok().getString().lower();
4256 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4257 return Error(getLoc(), "expecting 'z' predication");
4258
4259 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4260 return Error(getLoc(), "expecting 'm' or 'z' predication");
4261
4262 // Add zero/merge token.
4263 const char *ZM = Pred == "z" ? "z" : "m";
4264 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4265
4266 Lex(); // Eat zero/merge token.
4267 return ParseStatus::Success;
4268 }
4269
4270 /// parseRegister - Parse a register operand.
parseRegister(OperandVector & Operands)4271 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4272 // Try for a Neon vector register.
4273 if (!tryParseNeonVectorRegister(Operands))
4274 return false;
4275
4276 if (tryParseZTOperand(Operands).isSuccess())
4277 return false;
4278
4279 // Otherwise try for a scalar register.
4280 if (tryParseGPROperand<false>(Operands).isSuccess())
4281 return false;
4282
4283 return true;
4284 }
4285
parseSymbolicImmVal(const MCExpr * & ImmVal)4286 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4287 bool HasELFModifier = false;
4288 AArch64MCExpr::VariantKind RefKind;
4289
4290 if (parseOptionalToken(AsmToken::Colon)) {
4291 HasELFModifier = true;
4292
4293 if (getTok().isNot(AsmToken::Identifier))
4294 return TokError("expect relocation specifier in operand after ':'");
4295
4296 std::string LowerCase = getTok().getIdentifier().lower();
4297 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4298 .Case("lo12", AArch64MCExpr::VK_LO12)
4299 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4300 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4301 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4302 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4303 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4304 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4305 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4306 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4307 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4308 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4309 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4310 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4311 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4312 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4313 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4314 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4315 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4316 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4317 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4318 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4319 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4320 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4321 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4322 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4323 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4324 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4325 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4326 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4327 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4328 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4329 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4330 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4331 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4332 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4333 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4334 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4335 .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4336 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4337 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4338 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4339 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4340 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4341 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4342 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4343 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4344 .Default(AArch64MCExpr::VK_INVALID);
4345
4346 if (RefKind == AArch64MCExpr::VK_INVALID)
4347 return TokError("expect relocation specifier in operand after ':'");
4348
4349 Lex(); // Eat identifier
4350
4351 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4352 return true;
4353 }
4354
4355 if (getParser().parseExpression(ImmVal))
4356 return true;
4357
4358 if (HasELFModifier)
4359 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4360
4361 return false;
4362 }
4363
tryParseMatrixTileList(OperandVector & Operands)4364 ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4365 if (getTok().isNot(AsmToken::LCurly))
4366 return ParseStatus::NoMatch;
4367
4368 auto ParseMatrixTile = [this](unsigned &Reg,
4369 unsigned &ElementWidth) -> ParseStatus {
4370 StringRef Name = getTok().getString();
4371 size_t DotPosition = Name.find('.');
4372 if (DotPosition == StringRef::npos)
4373 return ParseStatus::NoMatch;
4374
4375 unsigned RegNum = matchMatrixTileListRegName(Name);
4376 if (!RegNum)
4377 return ParseStatus::NoMatch;
4378
4379 StringRef Tail = Name.drop_front(DotPosition);
4380 const std::optional<std::pair<int, int>> &KindRes =
4381 parseVectorKind(Tail, RegKind::Matrix);
4382 if (!KindRes)
4383 return TokError(
4384 "Expected the register to be followed by element width suffix");
4385 ElementWidth = KindRes->second;
4386 Reg = RegNum;
4387 Lex(); // Eat the register.
4388 return ParseStatus::Success;
4389 };
4390
4391 SMLoc S = getLoc();
4392 auto LCurly = getTok();
4393 Lex(); // Eat left bracket token.
4394
4395 // Empty matrix list
4396 if (parseOptionalToken(AsmToken::RCurly)) {
4397 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4398 /*RegMask=*/0, S, getLoc(), getContext()));
4399 return ParseStatus::Success;
4400 }
4401
4402 // Try parse {za} alias early
4403 if (getTok().getString().equals_insensitive("za")) {
4404 Lex(); // Eat 'za'
4405
4406 if (parseToken(AsmToken::RCurly, "'}' expected"))
4407 return ParseStatus::Failure;
4408
4409 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4410 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4411 return ParseStatus::Success;
4412 }
4413
4414 SMLoc TileLoc = getLoc();
4415
4416 unsigned FirstReg, ElementWidth;
4417 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4418 if (!ParseRes.isSuccess()) {
4419 getLexer().UnLex(LCurly);
4420 return ParseRes;
4421 }
4422
4423 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4424
4425 unsigned PrevReg = FirstReg;
4426
4427 SmallSet<unsigned, 8> DRegs;
4428 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4429
4430 SmallSet<unsigned, 8> SeenRegs;
4431 SeenRegs.insert(FirstReg);
4432
4433 while (parseOptionalToken(AsmToken::Comma)) {
4434 TileLoc = getLoc();
4435 unsigned Reg, NextElementWidth;
4436 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4437 if (!ParseRes.isSuccess())
4438 return ParseRes;
4439
4440 // Element size must match on all regs in the list.
4441 if (ElementWidth != NextElementWidth)
4442 return Error(TileLoc, "mismatched register size suffix");
4443
4444 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4445 Warning(TileLoc, "tile list not in ascending order");
4446
4447 if (SeenRegs.contains(Reg))
4448 Warning(TileLoc, "duplicate tile in list");
4449 else {
4450 SeenRegs.insert(Reg);
4451 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4452 }
4453
4454 PrevReg = Reg;
4455 }
4456
4457 if (parseToken(AsmToken::RCurly, "'}' expected"))
4458 return ParseStatus::Failure;
4459
4460 unsigned RegMask = 0;
4461 for (auto Reg : DRegs)
4462 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4463 RI->getEncodingValue(AArch64::ZAD0));
4464 Operands.push_back(
4465 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4466
4467 return ParseStatus::Success;
4468 }
4469
4470 template <RegKind VectorKind>
tryParseVectorList(OperandVector & Operands,bool ExpectMatch)4471 ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4472 bool ExpectMatch) {
4473 MCAsmParser &Parser = getParser();
4474 if (!getTok().is(AsmToken::LCurly))
4475 return ParseStatus::NoMatch;
4476
4477 // Wrapper around parse function
4478 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4479 bool NoMatchIsError) -> ParseStatus {
4480 auto RegTok = getTok();
4481 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4482 if (ParseRes.isSuccess()) {
4483 if (parseVectorKind(Kind, VectorKind))
4484 return ParseRes;
4485 llvm_unreachable("Expected a valid vector kind");
4486 }
4487
4488 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4489 RegTok.getString().equals_insensitive("zt0"))
4490 return ParseStatus::NoMatch;
4491
4492 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4493 (ParseRes.isNoMatch() && NoMatchIsError &&
4494 !RegTok.getString().starts_with_insensitive("za")))
4495 return Error(Loc, "vector register expected");
4496
4497 return ParseStatus::NoMatch;
4498 };
4499
4500 int NumRegs = getNumRegsForRegKind(VectorKind);
4501 SMLoc S = getLoc();
4502 auto LCurly = getTok();
4503 Lex(); // Eat left bracket token.
4504
4505 StringRef Kind;
4506 MCRegister FirstReg;
4507 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4508
4509 // Put back the original left bracket if there was no match, so that
4510 // different types of list-operands can be matched (e.g. SVE, Neon).
4511 if (ParseRes.isNoMatch())
4512 Parser.getLexer().UnLex(LCurly);
4513
4514 if (!ParseRes.isSuccess())
4515 return ParseRes;
4516
4517 int64_t PrevReg = FirstReg;
4518 unsigned Count = 1;
4519
4520 int Stride = 1;
4521 if (parseOptionalToken(AsmToken::Minus)) {
4522 SMLoc Loc = getLoc();
4523 StringRef NextKind;
4524
4525 MCRegister Reg;
4526 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4527 if (!ParseRes.isSuccess())
4528 return ParseRes;
4529
4530 // Any Kind suffices must match on all regs in the list.
4531 if (Kind != NextKind)
4532 return Error(Loc, "mismatched register size suffix");
4533
4534 unsigned Space =
4535 (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4536
4537 if (Space == 0 || Space > 3)
4538 return Error(Loc, "invalid number of vectors");
4539
4540 Count += Space;
4541 }
4542 else {
4543 bool HasCalculatedStride = false;
4544 while (parseOptionalToken(AsmToken::Comma)) {
4545 SMLoc Loc = getLoc();
4546 StringRef NextKind;
4547 MCRegister Reg;
4548 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4549 if (!ParseRes.isSuccess())
4550 return ParseRes;
4551
4552 // Any Kind suffices must match on all regs in the list.
4553 if (Kind != NextKind)
4554 return Error(Loc, "mismatched register size suffix");
4555
4556 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4557 unsigned PrevRegVal =
4558 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4559 if (!HasCalculatedStride) {
4560 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4561 : (RegVal + NumRegs - PrevRegVal);
4562 HasCalculatedStride = true;
4563 }
4564
4565 // Register must be incremental (with a wraparound at last register).
4566 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4567 return Error(Loc, "registers must have the same sequential stride");
4568
4569 PrevReg = Reg;
4570 ++Count;
4571 }
4572 }
4573
4574 if (parseToken(AsmToken::RCurly, "'}' expected"))
4575 return ParseStatus::Failure;
4576
4577 if (Count > 4)
4578 return Error(S, "invalid number of vectors");
4579
4580 unsigned NumElements = 0;
4581 unsigned ElementWidth = 0;
4582 if (!Kind.empty()) {
4583 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4584 std::tie(NumElements, ElementWidth) = *VK;
4585 }
4586
4587 Operands.push_back(AArch64Operand::CreateVectorList(
4588 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4589 getLoc(), getContext()));
4590
4591 return ParseStatus::Success;
4592 }
4593
4594 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
parseNeonVectorList(OperandVector & Operands)4595 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4596 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4597 if (!ParseRes.isSuccess())
4598 return true;
4599
4600 return tryParseVectorIndex(Operands).isFailure();
4601 }
4602
tryParseGPR64sp0Operand(OperandVector & Operands)4603 ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4604 SMLoc StartLoc = getLoc();
4605
4606 MCRegister RegNum;
4607 ParseStatus Res = tryParseScalarRegister(RegNum);
4608 if (!Res.isSuccess())
4609 return Res;
4610
4611 if (!parseOptionalToken(AsmToken::Comma)) {
4612 Operands.push_back(AArch64Operand::CreateReg(
4613 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4614 return ParseStatus::Success;
4615 }
4616
4617 parseOptionalToken(AsmToken::Hash);
4618
4619 if (getTok().isNot(AsmToken::Integer))
4620 return Error(getLoc(), "index must be absent or #0");
4621
4622 const MCExpr *ImmVal;
4623 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4624 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4625 return Error(getLoc(), "index must be absent or #0");
4626
4627 Operands.push_back(AArch64Operand::CreateReg(
4628 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4629 return ParseStatus::Success;
4630 }
4631
tryParseZTOperand(OperandVector & Operands)4632 ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4633 SMLoc StartLoc = getLoc();
4634 const AsmToken &Tok = getTok();
4635 std::string Name = Tok.getString().lower();
4636
4637 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4638
4639 if (RegNum == 0)
4640 return ParseStatus::NoMatch;
4641
4642 Operands.push_back(AArch64Operand::CreateReg(
4643 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4644 Lex(); // Eat register.
4645
4646 // Check if register is followed by an index
4647 if (parseOptionalToken(AsmToken::LBrac)) {
4648 Operands.push_back(
4649 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4650 const MCExpr *ImmVal;
4651 if (getParser().parseExpression(ImmVal))
4652 return ParseStatus::NoMatch;
4653 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4654 if (!MCE)
4655 return TokError("immediate value expected for vector index");
4656 Operands.push_back(AArch64Operand::CreateImm(
4657 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4658 getLoc(), getContext()));
4659 if (parseOptionalToken(AsmToken::Comma))
4660 if (parseOptionalMulOperand(Operands))
4661 return ParseStatus::Failure;
4662 if (parseToken(AsmToken::RBrac, "']' expected"))
4663 return ParseStatus::Failure;
4664 Operands.push_back(
4665 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4666 }
4667 return ParseStatus::Success;
4668 }
4669
4670 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
tryParseGPROperand(OperandVector & Operands)4671 ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4672 SMLoc StartLoc = getLoc();
4673
4674 MCRegister RegNum;
4675 ParseStatus Res = tryParseScalarRegister(RegNum);
4676 if (!Res.isSuccess())
4677 return Res;
4678
4679 // No shift/extend is the default.
4680 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4681 Operands.push_back(AArch64Operand::CreateReg(
4682 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4683 return ParseStatus::Success;
4684 }
4685
4686 // Eat the comma
4687 Lex();
4688
4689 // Match the shift
4690 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4691 Res = tryParseOptionalShiftExtend(ExtOpnd);
4692 if (!Res.isSuccess())
4693 return Res;
4694
4695 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4696 Operands.push_back(AArch64Operand::CreateReg(
4697 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4698 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4699 Ext->hasShiftExtendAmount()));
4700
4701 return ParseStatus::Success;
4702 }
4703
parseOptionalMulOperand(OperandVector & Operands)4704 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4705 MCAsmParser &Parser = getParser();
4706
4707 // Some SVE instructions have a decoration after the immediate, i.e.
4708 // "mul vl". We parse them here and add tokens, which must be present in the
4709 // asm string in the tablegen instruction.
4710 bool NextIsVL =
4711 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4712 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4713 if (!getTok().getString().equals_insensitive("mul") ||
4714 !(NextIsVL || NextIsHash))
4715 return true;
4716
4717 Operands.push_back(
4718 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4719 Lex(); // Eat the "mul"
4720
4721 if (NextIsVL) {
4722 Operands.push_back(
4723 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4724 Lex(); // Eat the "vl"
4725 return false;
4726 }
4727
4728 if (NextIsHash) {
4729 Lex(); // Eat the #
4730 SMLoc S = getLoc();
4731
4732 // Parse immediate operand.
4733 const MCExpr *ImmVal;
4734 if (!Parser.parseExpression(ImmVal))
4735 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4736 Operands.push_back(AArch64Operand::CreateImm(
4737 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4738 getContext()));
4739 return false;
4740 }
4741 }
4742
4743 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4744 }
4745
parseOptionalVGOperand(OperandVector & Operands,StringRef & VecGroup)4746 bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4747 StringRef &VecGroup) {
4748 MCAsmParser &Parser = getParser();
4749 auto Tok = Parser.getTok();
4750 if (Tok.isNot(AsmToken::Identifier))
4751 return true;
4752
4753 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4754 .Case("vgx2", "vgx2")
4755 .Case("vgx4", "vgx4")
4756 .Default("");
4757
4758 if (VG.empty())
4759 return true;
4760
4761 VecGroup = VG;
4762 Parser.Lex(); // Eat vgx[2|4]
4763 return false;
4764 }
4765
parseKeywordOperand(OperandVector & Operands)4766 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4767 auto Tok = getTok();
4768 if (Tok.isNot(AsmToken::Identifier))
4769 return true;
4770
4771 auto Keyword = Tok.getString();
4772 Keyword = StringSwitch<StringRef>(Keyword.lower())
4773 .Case("sm", "sm")
4774 .Case("za", "za")
4775 .Default(Keyword);
4776 Operands.push_back(
4777 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4778
4779 Lex();
4780 return false;
4781 }
4782
4783 /// parseOperand - Parse a arm instruction operand. For now this parses the
4784 /// operand regardless of the mnemonic.
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)4785 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4786 bool invertCondCode) {
4787 MCAsmParser &Parser = getParser();
4788
4789 ParseStatus ResTy =
4790 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4791
4792 // Check if the current operand has a custom associated parser, if so, try to
4793 // custom parse the operand, or fallback to the general approach.
4794 if (ResTy.isSuccess())
4795 return false;
4796 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4797 // there was a match, but an error occurred, in which case, just return that
4798 // the operand parsing failed.
4799 if (ResTy.isFailure())
4800 return true;
4801
4802 // Nothing custom, so do general case parsing.
4803 SMLoc S, E;
4804 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4805 if (parseOptionalToken(AsmToken::Comma)) {
4806 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4807 if (!Res.isNoMatch())
4808 return Res.isFailure();
4809 getLexer().UnLex(SavedTok);
4810 }
4811 return false;
4812 };
4813 switch (getLexer().getKind()) {
4814 default: {
4815 SMLoc S = getLoc();
4816 const MCExpr *Expr;
4817 if (parseSymbolicImmVal(Expr))
4818 return Error(S, "invalid operand");
4819
4820 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4821 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4822 return parseOptionalShiftExtend(getTok());
4823 }
4824 case AsmToken::LBrac: {
4825 Operands.push_back(
4826 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4827 Lex(); // Eat '['
4828
4829 // There's no comma after a '[', so we can parse the next operand
4830 // immediately.
4831 return parseOperand(Operands, false, false);
4832 }
4833 case AsmToken::LCurly: {
4834 if (!parseNeonVectorList(Operands))
4835 return false;
4836
4837 Operands.push_back(
4838 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4839 Lex(); // Eat '{'
4840
4841 // There's no comma after a '{', so we can parse the next operand
4842 // immediately.
4843 return parseOperand(Operands, false, false);
4844 }
4845 case AsmToken::Identifier: {
4846 // See if this is a "VG" decoration used by SME instructions.
4847 StringRef VecGroup;
4848 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4849 Operands.push_back(
4850 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4851 return false;
4852 }
4853 // If we're expecting a Condition Code operand, then just parse that.
4854 if (isCondCode)
4855 return parseCondCode(Operands, invertCondCode);
4856
4857 // If it's a register name, parse it.
4858 if (!parseRegister(Operands)) {
4859 // Parse an optional shift/extend modifier.
4860 AsmToken SavedTok = getTok();
4861 if (parseOptionalToken(AsmToken::Comma)) {
4862 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4863 // such cases and don't report an error when <label> happens to match a
4864 // shift/extend modifier.
4865 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4866 /*ParseForAllFeatures=*/true);
4867 if (!Res.isNoMatch())
4868 return Res.isFailure();
4869 Res = tryParseOptionalShiftExtend(Operands);
4870 if (!Res.isNoMatch())
4871 return Res.isFailure();
4872 getLexer().UnLex(SavedTok);
4873 }
4874 return false;
4875 }
4876
4877 // See if this is a "mul vl" decoration or "mul #<int>" operand used
4878 // by SVE instructions.
4879 if (!parseOptionalMulOperand(Operands))
4880 return false;
4881
4882 // If this is a two-word mnemonic, parse its special keyword
4883 // operand as an identifier.
4884 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4885 Mnemonic == "gcsb")
4886 return parseKeywordOperand(Operands);
4887
4888 // This was not a register so parse other operands that start with an
4889 // identifier (like labels) as expressions and create them as immediates.
4890 const MCExpr *IdVal;
4891 S = getLoc();
4892 if (getParser().parseExpression(IdVal))
4893 return true;
4894 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4895 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4896 return false;
4897 }
4898 case AsmToken::Integer:
4899 case AsmToken::Real:
4900 case AsmToken::Hash: {
4901 // #42 -> immediate.
4902 S = getLoc();
4903
4904 parseOptionalToken(AsmToken::Hash);
4905
4906 // Parse a negative sign
4907 bool isNegative = false;
4908 if (getTok().is(AsmToken::Minus)) {
4909 isNegative = true;
4910 // We need to consume this token only when we have a Real, otherwise
4911 // we let parseSymbolicImmVal take care of it
4912 if (Parser.getLexer().peekTok().is(AsmToken::Real))
4913 Lex();
4914 }
4915
4916 // The only Real that should come through here is a literal #0.0 for
4917 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4918 // so convert the value.
4919 const AsmToken &Tok = getTok();
4920 if (Tok.is(AsmToken::Real)) {
4921 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4922 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4923 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4924 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4925 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4926 return TokError("unexpected floating point literal");
4927 else if (IntVal != 0 || isNegative)
4928 return TokError("expected floating-point constant #0.0");
4929 Lex(); // Eat the token.
4930
4931 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4932 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4933 return false;
4934 }
4935
4936 const MCExpr *ImmVal;
4937 if (parseSymbolicImmVal(ImmVal))
4938 return true;
4939
4940 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4941 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4942
4943 // Parse an optional shift/extend modifier.
4944 return parseOptionalShiftExtend(Tok);
4945 }
4946 case AsmToken::Equal: {
4947 SMLoc Loc = getLoc();
4948 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4949 return TokError("unexpected token in operand");
4950 Lex(); // Eat '='
4951 const MCExpr *SubExprVal;
4952 if (getParser().parseExpression(SubExprVal))
4953 return true;
4954
4955 if (Operands.size() < 2 ||
4956 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4957 return Error(Loc, "Only valid when first operand is register");
4958
4959 bool IsXReg =
4960 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4961 Operands[1]->getReg());
4962
4963 MCContext& Ctx = getContext();
4964 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4965 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4966 if (isa<MCConstantExpr>(SubExprVal)) {
4967 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4968 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4969 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4970 ShiftAmt += 16;
4971 Imm >>= 16;
4972 }
4973 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4974 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4975 Operands.push_back(AArch64Operand::CreateImm(
4976 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4977 if (ShiftAmt)
4978 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4979 ShiftAmt, true, S, E, Ctx));
4980 return false;
4981 }
4982 APInt Simm = APInt(64, Imm << ShiftAmt);
4983 // check if the immediate is an unsigned or signed 32-bit int for W regs
4984 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4985 return Error(Loc, "Immediate too large for register");
4986 }
4987 // If it is a label or an imm that cannot fit in a movz, put it into CP.
4988 const MCExpr *CPLoc =
4989 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4990 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4991 return false;
4992 }
4993 }
4994 }
4995
parseImmExpr(int64_t & Out)4996 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4997 const MCExpr *Expr = nullptr;
4998 SMLoc L = getLoc();
4999 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5000 return true;
5001 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5002 if (check(!Value, L, "expected constant expression"))
5003 return true;
5004 Out = Value->getValue();
5005 return false;
5006 }
5007
parseComma()5008 bool AArch64AsmParser::parseComma() {
5009 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5010 return true;
5011 // Eat the comma
5012 Lex();
5013 return false;
5014 }
5015
parseRegisterInRange(unsigned & Out,unsigned Base,unsigned First,unsigned Last)5016 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5017 unsigned First, unsigned Last) {
5018 MCRegister Reg;
5019 SMLoc Start, End;
5020 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5021 return true;
5022
5023 // Special handling for FP and LR; they aren't linearly after x28 in
5024 // the registers enum.
5025 unsigned RangeEnd = Last;
5026 if (Base == AArch64::X0) {
5027 if (Last == AArch64::FP) {
5028 RangeEnd = AArch64::X28;
5029 if (Reg == AArch64::FP) {
5030 Out = 29;
5031 return false;
5032 }
5033 }
5034 if (Last == AArch64::LR) {
5035 RangeEnd = AArch64::X28;
5036 if (Reg == AArch64::FP) {
5037 Out = 29;
5038 return false;
5039 } else if (Reg == AArch64::LR) {
5040 Out = 30;
5041 return false;
5042 }
5043 }
5044 }
5045
5046 if (check(Reg < First || Reg > RangeEnd, Start,
5047 Twine("expected register in range ") +
5048 AArch64InstPrinter::getRegisterName(First) + " to " +
5049 AArch64InstPrinter::getRegisterName(Last)))
5050 return true;
5051 Out = Reg - Base;
5052 return false;
5053 }
5054
areEqualRegs(const MCParsedAsmOperand & Op1,const MCParsedAsmOperand & Op2) const5055 bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5056 const MCParsedAsmOperand &Op2) const {
5057 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5058 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5059
5060 if (AOp1.isVectorList() && AOp2.isVectorList())
5061 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5062 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5063 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5064
5065 if (!AOp1.isReg() || !AOp2.isReg())
5066 return false;
5067
5068 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5069 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5070 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5071
5072 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5073 "Testing equality of non-scalar registers not supported");
5074
5075 // Check if a registers match their sub/super register classes.
5076 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5077 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5078 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5079 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5080 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5081 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5082 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5083 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5084
5085 return false;
5086 }
5087
5088 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5089 /// operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)5090 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5091 StringRef Name, SMLoc NameLoc,
5092 OperandVector &Operands) {
5093 Name = StringSwitch<StringRef>(Name.lower())
5094 .Case("beq", "b.eq")
5095 .Case("bne", "b.ne")
5096 .Case("bhs", "b.hs")
5097 .Case("bcs", "b.cs")
5098 .Case("blo", "b.lo")
5099 .Case("bcc", "b.cc")
5100 .Case("bmi", "b.mi")
5101 .Case("bpl", "b.pl")
5102 .Case("bvs", "b.vs")
5103 .Case("bvc", "b.vc")
5104 .Case("bhi", "b.hi")
5105 .Case("bls", "b.ls")
5106 .Case("bge", "b.ge")
5107 .Case("blt", "b.lt")
5108 .Case("bgt", "b.gt")
5109 .Case("ble", "b.le")
5110 .Case("bal", "b.al")
5111 .Case("bnv", "b.nv")
5112 .Default(Name);
5113
5114 // First check for the AArch64-specific .req directive.
5115 if (getTok().is(AsmToken::Identifier) &&
5116 getTok().getIdentifier().lower() == ".req") {
5117 parseDirectiveReq(Name, NameLoc);
5118 // We always return 'error' for this, as we're done with this
5119 // statement and don't need to match the 'instruction."
5120 return true;
5121 }
5122
5123 // Create the leading tokens for the mnemonic, split by '.' characters.
5124 size_t Start = 0, Next = Name.find('.');
5125 StringRef Head = Name.slice(Start, Next);
5126
5127 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5128 // the SYS instruction.
5129 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5130 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5131 return parseSysAlias(Head, NameLoc, Operands);
5132
5133 // TLBIP instructions are aliases for the SYSP instruction.
5134 if (Head == "tlbip")
5135 return parseSyspAlias(Head, NameLoc, Operands);
5136
5137 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5138 Mnemonic = Head;
5139
5140 // Handle condition codes for a branch mnemonic
5141 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5142 Start = Next;
5143 Next = Name.find('.', Start + 1);
5144 Head = Name.slice(Start + 1, Next);
5145
5146 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5147 (Head.data() - Name.data()));
5148 std::string Suggestion;
5149 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5150 if (CC == AArch64CC::Invalid) {
5151 std::string Msg = "invalid condition code";
5152 if (!Suggestion.empty())
5153 Msg += ", did you mean " + Suggestion + "?";
5154 return Error(SuffixLoc, Msg);
5155 }
5156 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5157 /*IsSuffix=*/true));
5158 Operands.push_back(
5159 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5160 }
5161
5162 // Add the remaining tokens in the mnemonic.
5163 while (Next != StringRef::npos) {
5164 Start = Next;
5165 Next = Name.find('.', Start + 1);
5166 Head = Name.slice(Start, Next);
5167 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5168 (Head.data() - Name.data()) + 1);
5169 Operands.push_back(AArch64Operand::CreateToken(
5170 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5171 }
5172
5173 // Conditional compare instructions have a Condition Code operand, which needs
5174 // to be parsed and an immediate operand created.
5175 bool condCodeFourthOperand =
5176 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5177 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5178 Head == "csinc" || Head == "csinv" || Head == "csneg");
5179
5180 // These instructions are aliases to some of the conditional select
5181 // instructions. However, the condition code is inverted in the aliased
5182 // instruction.
5183 //
5184 // FIXME: Is this the correct way to handle these? Or should the parser
5185 // generate the aliased instructions directly?
5186 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5187 bool condCodeThirdOperand =
5188 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5189
5190 // Read the remaining operands.
5191 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5192
5193 unsigned N = 1;
5194 do {
5195 // Parse and remember the operand.
5196 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5197 (N == 3 && condCodeThirdOperand) ||
5198 (N == 2 && condCodeSecondOperand),
5199 condCodeSecondOperand || condCodeThirdOperand)) {
5200 return true;
5201 }
5202
5203 // After successfully parsing some operands there are three special cases
5204 // to consider (i.e. notional operands not separated by commas). Two are
5205 // due to memory specifiers:
5206 // + An RBrac will end an address for load/store/prefetch
5207 // + An '!' will indicate a pre-indexed operation.
5208 //
5209 // And a further case is '}', which ends a group of tokens specifying the
5210 // SME accumulator array 'ZA' or tile vector, i.e.
5211 //
5212 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5213 //
5214 // It's someone else's responsibility to make sure these tokens are sane
5215 // in the given context!
5216
5217 if (parseOptionalToken(AsmToken::RBrac))
5218 Operands.push_back(
5219 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5220 if (parseOptionalToken(AsmToken::Exclaim))
5221 Operands.push_back(
5222 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5223 if (parseOptionalToken(AsmToken::RCurly))
5224 Operands.push_back(
5225 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5226
5227 ++N;
5228 } while (parseOptionalToken(AsmToken::Comma));
5229 }
5230
5231 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5232 return true;
5233
5234 return false;
5235 }
5236
isMatchingOrAlias(unsigned ZReg,unsigned Reg)5237 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5238 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5239 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5240 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5241 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5242 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5243 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5244 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5245 }
5246
5247 // FIXME: This entire function is a giant hack to provide us with decent
5248 // operand range validation/diagnostics until TableGen/MC can be extended
5249 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SMLoc & IDLoc,SmallVectorImpl<SMLoc> & Loc)5250 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5251 SmallVectorImpl<SMLoc> &Loc) {
5252 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5253 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5254
5255 // A prefix only applies to the instruction following it. Here we extract
5256 // prefix information for the next instruction before validating the current
5257 // one so that in the case of failure we don't erronously continue using the
5258 // current prefix.
5259 PrefixInfo Prefix = NextPrefix;
5260 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5261
5262 // Before validating the instruction in isolation we run through the rules
5263 // applicable when it follows a prefix instruction.
5264 // NOTE: brk & hlt can be prefixed but require no additional validation.
5265 if (Prefix.isActive() &&
5266 (Inst.getOpcode() != AArch64::BRK) &&
5267 (Inst.getOpcode() != AArch64::HLT)) {
5268
5269 // Prefixed intructions must have a destructive operand.
5270 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5271 AArch64::NotDestructive)
5272 return Error(IDLoc, "instruction is unpredictable when following a"
5273 " movprfx, suggest replacing movprfx with mov");
5274
5275 // Destination operands must match.
5276 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5277 return Error(Loc[0], "instruction is unpredictable when following a"
5278 " movprfx writing to a different destination");
5279
5280 // Destination operand must not be used in any other location.
5281 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5282 if (Inst.getOperand(i).isReg() &&
5283 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5284 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5285 return Error(Loc[0], "instruction is unpredictable when following a"
5286 " movprfx and destination also used as non-destructive"
5287 " source");
5288 }
5289
5290 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5291 if (Prefix.isPredicated()) {
5292 int PgIdx = -1;
5293
5294 // Find the instructions general predicate.
5295 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5296 if (Inst.getOperand(i).isReg() &&
5297 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5298 PgIdx = i;
5299 break;
5300 }
5301
5302 // Instruction must be predicated if the movprfx is predicated.
5303 if (PgIdx == -1 ||
5304 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5305 return Error(IDLoc, "instruction is unpredictable when following a"
5306 " predicated movprfx, suggest using unpredicated movprfx");
5307
5308 // Instruction must use same general predicate as the movprfx.
5309 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5310 return Error(IDLoc, "instruction is unpredictable when following a"
5311 " predicated movprfx using a different general predicate");
5312
5313 // Instruction element type must match the movprfx.
5314 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5315 return Error(IDLoc, "instruction is unpredictable when following a"
5316 " predicated movprfx with a different element size");
5317 }
5318 }
5319
5320 // On ARM64EC, only valid registers may be used. Warn against using
5321 // explicitly disallowed registers.
5322 if (IsWindowsArm64EC) {
5323 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5324 if (Inst.getOperand(i).isReg()) {
5325 unsigned Reg = Inst.getOperand(i).getReg();
5326 // At this point, vector registers are matched to their
5327 // appropriately sized alias.
5328 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5329 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5330 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5331 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5332 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5333 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5334 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5335 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5336 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5337 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5338 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5339 " is disallowed on ARM64EC.");
5340 }
5341 }
5342 }
5343 }
5344
5345 // Check for indexed addressing modes w/ the base register being the
5346 // same as a destination/source register or pair load where
5347 // the Rt == Rt2. All of those are undefined behaviour.
5348 switch (Inst.getOpcode()) {
5349 case AArch64::LDPSWpre:
5350 case AArch64::LDPWpost:
5351 case AArch64::LDPWpre:
5352 case AArch64::LDPXpost:
5353 case AArch64::LDPXpre: {
5354 unsigned Rt = Inst.getOperand(1).getReg();
5355 unsigned Rt2 = Inst.getOperand(2).getReg();
5356 unsigned Rn = Inst.getOperand(3).getReg();
5357 if (RI->isSubRegisterEq(Rn, Rt))
5358 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5359 "is also a destination");
5360 if (RI->isSubRegisterEq(Rn, Rt2))
5361 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5362 "is also a destination");
5363 [[fallthrough]];
5364 }
5365 case AArch64::LDR_ZA:
5366 case AArch64::STR_ZA: {
5367 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5368 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5369 return Error(Loc[1],
5370 "unpredictable instruction, immediate and offset mismatch.");
5371 break;
5372 }
5373 case AArch64::LDPDi:
5374 case AArch64::LDPQi:
5375 case AArch64::LDPSi:
5376 case AArch64::LDPSWi:
5377 case AArch64::LDPWi:
5378 case AArch64::LDPXi: {
5379 unsigned Rt = Inst.getOperand(0).getReg();
5380 unsigned Rt2 = Inst.getOperand(1).getReg();
5381 if (Rt == Rt2)
5382 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5383 break;
5384 }
5385 case AArch64::LDPDpost:
5386 case AArch64::LDPDpre:
5387 case AArch64::LDPQpost:
5388 case AArch64::LDPQpre:
5389 case AArch64::LDPSpost:
5390 case AArch64::LDPSpre:
5391 case AArch64::LDPSWpost: {
5392 unsigned Rt = Inst.getOperand(1).getReg();
5393 unsigned Rt2 = Inst.getOperand(2).getReg();
5394 if (Rt == Rt2)
5395 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5396 break;
5397 }
5398 case AArch64::STPDpost:
5399 case AArch64::STPDpre:
5400 case AArch64::STPQpost:
5401 case AArch64::STPQpre:
5402 case AArch64::STPSpost:
5403 case AArch64::STPSpre:
5404 case AArch64::STPWpost:
5405 case AArch64::STPWpre:
5406 case AArch64::STPXpost:
5407 case AArch64::STPXpre: {
5408 unsigned Rt = Inst.getOperand(1).getReg();
5409 unsigned Rt2 = Inst.getOperand(2).getReg();
5410 unsigned Rn = Inst.getOperand(3).getReg();
5411 if (RI->isSubRegisterEq(Rn, Rt))
5412 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5413 "is also a source");
5414 if (RI->isSubRegisterEq(Rn, Rt2))
5415 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5416 "is also a source");
5417 break;
5418 }
5419 case AArch64::LDRBBpre:
5420 case AArch64::LDRBpre:
5421 case AArch64::LDRHHpre:
5422 case AArch64::LDRHpre:
5423 case AArch64::LDRSBWpre:
5424 case AArch64::LDRSBXpre:
5425 case AArch64::LDRSHWpre:
5426 case AArch64::LDRSHXpre:
5427 case AArch64::LDRSWpre:
5428 case AArch64::LDRWpre:
5429 case AArch64::LDRXpre:
5430 case AArch64::LDRBBpost:
5431 case AArch64::LDRBpost:
5432 case AArch64::LDRHHpost:
5433 case AArch64::LDRHpost:
5434 case AArch64::LDRSBWpost:
5435 case AArch64::LDRSBXpost:
5436 case AArch64::LDRSHWpost:
5437 case AArch64::LDRSHXpost:
5438 case AArch64::LDRSWpost:
5439 case AArch64::LDRWpost:
5440 case AArch64::LDRXpost: {
5441 unsigned Rt = Inst.getOperand(1).getReg();
5442 unsigned Rn = Inst.getOperand(2).getReg();
5443 if (RI->isSubRegisterEq(Rn, Rt))
5444 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5445 "is also a source");
5446 break;
5447 }
5448 case AArch64::STRBBpost:
5449 case AArch64::STRBpost:
5450 case AArch64::STRHHpost:
5451 case AArch64::STRHpost:
5452 case AArch64::STRWpost:
5453 case AArch64::STRXpost:
5454 case AArch64::STRBBpre:
5455 case AArch64::STRBpre:
5456 case AArch64::STRHHpre:
5457 case AArch64::STRHpre:
5458 case AArch64::STRWpre:
5459 case AArch64::STRXpre: {
5460 unsigned Rt = Inst.getOperand(1).getReg();
5461 unsigned Rn = Inst.getOperand(2).getReg();
5462 if (RI->isSubRegisterEq(Rn, Rt))
5463 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5464 "is also a source");
5465 break;
5466 }
5467 case AArch64::STXRB:
5468 case AArch64::STXRH:
5469 case AArch64::STXRW:
5470 case AArch64::STXRX:
5471 case AArch64::STLXRB:
5472 case AArch64::STLXRH:
5473 case AArch64::STLXRW:
5474 case AArch64::STLXRX: {
5475 unsigned Rs = Inst.getOperand(0).getReg();
5476 unsigned Rt = Inst.getOperand(1).getReg();
5477 unsigned Rn = Inst.getOperand(2).getReg();
5478 if (RI->isSubRegisterEq(Rt, Rs) ||
5479 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5480 return Error(Loc[0],
5481 "unpredictable STXR instruction, status is also a source");
5482 break;
5483 }
5484 case AArch64::STXPW:
5485 case AArch64::STXPX:
5486 case AArch64::STLXPW:
5487 case AArch64::STLXPX: {
5488 unsigned Rs = Inst.getOperand(0).getReg();
5489 unsigned Rt1 = Inst.getOperand(1).getReg();
5490 unsigned Rt2 = Inst.getOperand(2).getReg();
5491 unsigned Rn = Inst.getOperand(3).getReg();
5492 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5493 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5494 return Error(Loc[0],
5495 "unpredictable STXP instruction, status is also a source");
5496 break;
5497 }
5498 case AArch64::LDRABwriteback:
5499 case AArch64::LDRAAwriteback: {
5500 unsigned Xt = Inst.getOperand(0).getReg();
5501 unsigned Xn = Inst.getOperand(1).getReg();
5502 if (Xt == Xn)
5503 return Error(Loc[0],
5504 "unpredictable LDRA instruction, writeback base"
5505 " is also a destination");
5506 break;
5507 }
5508 }
5509
5510 // Check v8.8-A memops instructions.
5511 switch (Inst.getOpcode()) {
5512 case AArch64::CPYFP:
5513 case AArch64::CPYFPWN:
5514 case AArch64::CPYFPRN:
5515 case AArch64::CPYFPN:
5516 case AArch64::CPYFPWT:
5517 case AArch64::CPYFPWTWN:
5518 case AArch64::CPYFPWTRN:
5519 case AArch64::CPYFPWTN:
5520 case AArch64::CPYFPRT:
5521 case AArch64::CPYFPRTWN:
5522 case AArch64::CPYFPRTRN:
5523 case AArch64::CPYFPRTN:
5524 case AArch64::CPYFPT:
5525 case AArch64::CPYFPTWN:
5526 case AArch64::CPYFPTRN:
5527 case AArch64::CPYFPTN:
5528 case AArch64::CPYFM:
5529 case AArch64::CPYFMWN:
5530 case AArch64::CPYFMRN:
5531 case AArch64::CPYFMN:
5532 case AArch64::CPYFMWT:
5533 case AArch64::CPYFMWTWN:
5534 case AArch64::CPYFMWTRN:
5535 case AArch64::CPYFMWTN:
5536 case AArch64::CPYFMRT:
5537 case AArch64::CPYFMRTWN:
5538 case AArch64::CPYFMRTRN:
5539 case AArch64::CPYFMRTN:
5540 case AArch64::CPYFMT:
5541 case AArch64::CPYFMTWN:
5542 case AArch64::CPYFMTRN:
5543 case AArch64::CPYFMTN:
5544 case AArch64::CPYFE:
5545 case AArch64::CPYFEWN:
5546 case AArch64::CPYFERN:
5547 case AArch64::CPYFEN:
5548 case AArch64::CPYFEWT:
5549 case AArch64::CPYFEWTWN:
5550 case AArch64::CPYFEWTRN:
5551 case AArch64::CPYFEWTN:
5552 case AArch64::CPYFERT:
5553 case AArch64::CPYFERTWN:
5554 case AArch64::CPYFERTRN:
5555 case AArch64::CPYFERTN:
5556 case AArch64::CPYFET:
5557 case AArch64::CPYFETWN:
5558 case AArch64::CPYFETRN:
5559 case AArch64::CPYFETN:
5560 case AArch64::CPYP:
5561 case AArch64::CPYPWN:
5562 case AArch64::CPYPRN:
5563 case AArch64::CPYPN:
5564 case AArch64::CPYPWT:
5565 case AArch64::CPYPWTWN:
5566 case AArch64::CPYPWTRN:
5567 case AArch64::CPYPWTN:
5568 case AArch64::CPYPRT:
5569 case AArch64::CPYPRTWN:
5570 case AArch64::CPYPRTRN:
5571 case AArch64::CPYPRTN:
5572 case AArch64::CPYPT:
5573 case AArch64::CPYPTWN:
5574 case AArch64::CPYPTRN:
5575 case AArch64::CPYPTN:
5576 case AArch64::CPYM:
5577 case AArch64::CPYMWN:
5578 case AArch64::CPYMRN:
5579 case AArch64::CPYMN:
5580 case AArch64::CPYMWT:
5581 case AArch64::CPYMWTWN:
5582 case AArch64::CPYMWTRN:
5583 case AArch64::CPYMWTN:
5584 case AArch64::CPYMRT:
5585 case AArch64::CPYMRTWN:
5586 case AArch64::CPYMRTRN:
5587 case AArch64::CPYMRTN:
5588 case AArch64::CPYMT:
5589 case AArch64::CPYMTWN:
5590 case AArch64::CPYMTRN:
5591 case AArch64::CPYMTN:
5592 case AArch64::CPYE:
5593 case AArch64::CPYEWN:
5594 case AArch64::CPYERN:
5595 case AArch64::CPYEN:
5596 case AArch64::CPYEWT:
5597 case AArch64::CPYEWTWN:
5598 case AArch64::CPYEWTRN:
5599 case AArch64::CPYEWTN:
5600 case AArch64::CPYERT:
5601 case AArch64::CPYERTWN:
5602 case AArch64::CPYERTRN:
5603 case AArch64::CPYERTN:
5604 case AArch64::CPYET:
5605 case AArch64::CPYETWN:
5606 case AArch64::CPYETRN:
5607 case AArch64::CPYETN: {
5608 unsigned Xd_wb = Inst.getOperand(0).getReg();
5609 unsigned Xs_wb = Inst.getOperand(1).getReg();
5610 unsigned Xn_wb = Inst.getOperand(2).getReg();
5611 unsigned Xd = Inst.getOperand(3).getReg();
5612 unsigned Xs = Inst.getOperand(4).getReg();
5613 unsigned Xn = Inst.getOperand(5).getReg();
5614 if (Xd_wb != Xd)
5615 return Error(Loc[0],
5616 "invalid CPY instruction, Xd_wb and Xd do not match");
5617 if (Xs_wb != Xs)
5618 return Error(Loc[0],
5619 "invalid CPY instruction, Xs_wb and Xs do not match");
5620 if (Xn_wb != Xn)
5621 return Error(Loc[0],
5622 "invalid CPY instruction, Xn_wb and Xn do not match");
5623 if (Xd == Xs)
5624 return Error(Loc[0], "invalid CPY instruction, destination and source"
5625 " registers are the same");
5626 if (Xd == Xn)
5627 return Error(Loc[0], "invalid CPY instruction, destination and size"
5628 " registers are the same");
5629 if (Xs == Xn)
5630 return Error(Loc[0], "invalid CPY instruction, source and size"
5631 " registers are the same");
5632 break;
5633 }
5634 case AArch64::SETP:
5635 case AArch64::SETPT:
5636 case AArch64::SETPN:
5637 case AArch64::SETPTN:
5638 case AArch64::SETM:
5639 case AArch64::SETMT:
5640 case AArch64::SETMN:
5641 case AArch64::SETMTN:
5642 case AArch64::SETE:
5643 case AArch64::SETET:
5644 case AArch64::SETEN:
5645 case AArch64::SETETN:
5646 case AArch64::SETGP:
5647 case AArch64::SETGPT:
5648 case AArch64::SETGPN:
5649 case AArch64::SETGPTN:
5650 case AArch64::SETGM:
5651 case AArch64::SETGMT:
5652 case AArch64::SETGMN:
5653 case AArch64::SETGMTN:
5654 case AArch64::MOPSSETGE:
5655 case AArch64::MOPSSETGET:
5656 case AArch64::MOPSSETGEN:
5657 case AArch64::MOPSSETGETN: {
5658 unsigned Xd_wb = Inst.getOperand(0).getReg();
5659 unsigned Xn_wb = Inst.getOperand(1).getReg();
5660 unsigned Xd = Inst.getOperand(2).getReg();
5661 unsigned Xn = Inst.getOperand(3).getReg();
5662 unsigned Xm = Inst.getOperand(4).getReg();
5663 if (Xd_wb != Xd)
5664 return Error(Loc[0],
5665 "invalid SET instruction, Xd_wb and Xd do not match");
5666 if (Xn_wb != Xn)
5667 return Error(Loc[0],
5668 "invalid SET instruction, Xn_wb and Xn do not match");
5669 if (Xd == Xn)
5670 return Error(Loc[0], "invalid SET instruction, destination and size"
5671 " registers are the same");
5672 if (Xd == Xm)
5673 return Error(Loc[0], "invalid SET instruction, destination and source"
5674 " registers are the same");
5675 if (Xn == Xm)
5676 return Error(Loc[0], "invalid SET instruction, source and size"
5677 " registers are the same");
5678 break;
5679 }
5680 }
5681
5682 // Now check immediate ranges. Separate from the above as there is overlap
5683 // in the instructions being checked and this keeps the nested conditionals
5684 // to a minimum.
5685 switch (Inst.getOpcode()) {
5686 case AArch64::ADDSWri:
5687 case AArch64::ADDSXri:
5688 case AArch64::ADDWri:
5689 case AArch64::ADDXri:
5690 case AArch64::SUBSWri:
5691 case AArch64::SUBSXri:
5692 case AArch64::SUBWri:
5693 case AArch64::SUBXri: {
5694 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5695 // some slight duplication here.
5696 if (Inst.getOperand(2).isExpr()) {
5697 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5698 AArch64MCExpr::VariantKind ELFRefKind;
5699 MCSymbolRefExpr::VariantKind DarwinRefKind;
5700 int64_t Addend;
5701 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5702
5703 // Only allow these with ADDXri.
5704 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5705 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5706 Inst.getOpcode() == AArch64::ADDXri)
5707 return false;
5708
5709 // Only allow these with ADDXri/ADDWri
5710 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5711 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5712 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5713 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5714 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5715 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5716 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5717 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5718 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5719 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5720 (Inst.getOpcode() == AArch64::ADDXri ||
5721 Inst.getOpcode() == AArch64::ADDWri))
5722 return false;
5723
5724 // Don't allow symbol refs in the immediate field otherwise
5725 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5726 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5727 // 'cmp w0, 'borked')
5728 return Error(Loc.back(), "invalid immediate expression");
5729 }
5730 // We don't validate more complex expressions here
5731 }
5732 return false;
5733 }
5734 default:
5735 return false;
5736 }
5737 }
5738
5739 static std::string AArch64MnemonicSpellCheck(StringRef S,
5740 const FeatureBitset &FBS,
5741 unsigned VariantID = 0);
5742
showMatchError(SMLoc Loc,unsigned ErrCode,uint64_t ErrorInfo,OperandVector & Operands)5743 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5744 uint64_t ErrorInfo,
5745 OperandVector &Operands) {
5746 switch (ErrCode) {
5747 case Match_InvalidTiedOperand: {
5748 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5749 if (Op.isVectorList())
5750 return Error(Loc, "operand must match destination register list");
5751
5752 assert(Op.isReg() && "Unexpected operand type");
5753 switch (Op.getRegEqualityTy()) {
5754 case RegConstraintEqualityTy::EqualsSubReg:
5755 return Error(Loc, "operand must be 64-bit form of destination register");
5756 case RegConstraintEqualityTy::EqualsSuperReg:
5757 return Error(Loc, "operand must be 32-bit form of destination register");
5758 case RegConstraintEqualityTy::EqualsReg:
5759 return Error(Loc, "operand must match destination register");
5760 }
5761 llvm_unreachable("Unknown RegConstraintEqualityTy");
5762 }
5763 case Match_MissingFeature:
5764 return Error(Loc,
5765 "instruction requires a CPU feature not currently enabled");
5766 case Match_InvalidOperand:
5767 return Error(Loc, "invalid operand for instruction");
5768 case Match_InvalidSuffix:
5769 return Error(Loc, "invalid type suffix for instruction");
5770 case Match_InvalidCondCode:
5771 return Error(Loc, "expected AArch64 condition code");
5772 case Match_AddSubRegExtendSmall:
5773 return Error(Loc,
5774 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5775 case Match_AddSubRegExtendLarge:
5776 return Error(Loc,
5777 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5778 case Match_AddSubSecondSource:
5779 return Error(Loc,
5780 "expected compatible register, symbol or integer in range [0, 4095]");
5781 case Match_LogicalSecondSource:
5782 return Error(Loc, "expected compatible register or logical immediate");
5783 case Match_InvalidMovImm32Shift:
5784 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5785 case Match_InvalidMovImm64Shift:
5786 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5787 case Match_AddSubRegShift32:
5788 return Error(Loc,
5789 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5790 case Match_AddSubRegShift64:
5791 return Error(Loc,
5792 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5793 case Match_InvalidFPImm:
5794 return Error(Loc,
5795 "expected compatible register or floating-point constant");
5796 case Match_InvalidMemoryIndexedSImm6:
5797 return Error(Loc, "index must be an integer in range [-32, 31].");
5798 case Match_InvalidMemoryIndexedSImm5:
5799 return Error(Loc, "index must be an integer in range [-16, 15].");
5800 case Match_InvalidMemoryIndexed1SImm4:
5801 return Error(Loc, "index must be an integer in range [-8, 7].");
5802 case Match_InvalidMemoryIndexed2SImm4:
5803 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5804 case Match_InvalidMemoryIndexed3SImm4:
5805 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5806 case Match_InvalidMemoryIndexed4SImm4:
5807 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5808 case Match_InvalidMemoryIndexed16SImm4:
5809 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5810 case Match_InvalidMemoryIndexed32SImm4:
5811 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5812 case Match_InvalidMemoryIndexed1SImm6:
5813 return Error(Loc, "index must be an integer in range [-32, 31].");
5814 case Match_InvalidMemoryIndexedSImm8:
5815 return Error(Loc, "index must be an integer in range [-128, 127].");
5816 case Match_InvalidMemoryIndexedSImm9:
5817 return Error(Loc, "index must be an integer in range [-256, 255].");
5818 case Match_InvalidMemoryIndexed16SImm9:
5819 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5820 case Match_InvalidMemoryIndexed8SImm10:
5821 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5822 case Match_InvalidMemoryIndexed4SImm7:
5823 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5824 case Match_InvalidMemoryIndexed8SImm7:
5825 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5826 case Match_InvalidMemoryIndexed16SImm7:
5827 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5828 case Match_InvalidMemoryIndexed8UImm5:
5829 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5830 case Match_InvalidMemoryIndexed8UImm3:
5831 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5832 case Match_InvalidMemoryIndexed4UImm5:
5833 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5834 case Match_InvalidMemoryIndexed2UImm5:
5835 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5836 case Match_InvalidMemoryIndexed8UImm6:
5837 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5838 case Match_InvalidMemoryIndexed16UImm6:
5839 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5840 case Match_InvalidMemoryIndexed4UImm6:
5841 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5842 case Match_InvalidMemoryIndexed2UImm6:
5843 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5844 case Match_InvalidMemoryIndexed1UImm6:
5845 return Error(Loc, "index must be in range [0, 63].");
5846 case Match_InvalidMemoryWExtend8:
5847 return Error(Loc,
5848 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5849 case Match_InvalidMemoryWExtend16:
5850 return Error(Loc,
5851 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5852 case Match_InvalidMemoryWExtend32:
5853 return Error(Loc,
5854 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5855 case Match_InvalidMemoryWExtend64:
5856 return Error(Loc,
5857 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5858 case Match_InvalidMemoryWExtend128:
5859 return Error(Loc,
5860 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5861 case Match_InvalidMemoryXExtend8:
5862 return Error(Loc,
5863 "expected 'lsl' or 'sxtx' with optional shift of #0");
5864 case Match_InvalidMemoryXExtend16:
5865 return Error(Loc,
5866 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5867 case Match_InvalidMemoryXExtend32:
5868 return Error(Loc,
5869 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5870 case Match_InvalidMemoryXExtend64:
5871 return Error(Loc,
5872 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5873 case Match_InvalidMemoryXExtend128:
5874 return Error(Loc,
5875 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5876 case Match_InvalidMemoryIndexed1:
5877 return Error(Loc, "index must be an integer in range [0, 4095].");
5878 case Match_InvalidMemoryIndexed2:
5879 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5880 case Match_InvalidMemoryIndexed4:
5881 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5882 case Match_InvalidMemoryIndexed8:
5883 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5884 case Match_InvalidMemoryIndexed16:
5885 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5886 case Match_InvalidImm0_0:
5887 return Error(Loc, "immediate must be 0.");
5888 case Match_InvalidImm0_1:
5889 return Error(Loc, "immediate must be an integer in range [0, 1].");
5890 case Match_InvalidImm0_3:
5891 return Error(Loc, "immediate must be an integer in range [0, 3].");
5892 case Match_InvalidImm0_7:
5893 return Error(Loc, "immediate must be an integer in range [0, 7].");
5894 case Match_InvalidImm0_15:
5895 return Error(Loc, "immediate must be an integer in range [0, 15].");
5896 case Match_InvalidImm0_31:
5897 return Error(Loc, "immediate must be an integer in range [0, 31].");
5898 case Match_InvalidImm0_63:
5899 return Error(Loc, "immediate must be an integer in range [0, 63].");
5900 case Match_InvalidImm0_127:
5901 return Error(Loc, "immediate must be an integer in range [0, 127].");
5902 case Match_InvalidImm0_255:
5903 return Error(Loc, "immediate must be an integer in range [0, 255].");
5904 case Match_InvalidImm0_65535:
5905 return Error(Loc, "immediate must be an integer in range [0, 65535].");
5906 case Match_InvalidImm1_8:
5907 return Error(Loc, "immediate must be an integer in range [1, 8].");
5908 case Match_InvalidImm1_16:
5909 return Error(Loc, "immediate must be an integer in range [1, 16].");
5910 case Match_InvalidImm1_32:
5911 return Error(Loc, "immediate must be an integer in range [1, 32].");
5912 case Match_InvalidImm1_64:
5913 return Error(Loc, "immediate must be an integer in range [1, 64].");
5914 case Match_InvalidMemoryIndexedRange2UImm0:
5915 return Error(Loc, "vector select offset must be the immediate range 0:1.");
5916 case Match_InvalidMemoryIndexedRange2UImm1:
5917 return Error(Loc, "vector select offset must be an immediate range of the "
5918 "form <immf>:<imml>, where the first "
5919 "immediate is a multiple of 2 in the range [0, 2], and "
5920 "the second immediate is immf + 1.");
5921 case Match_InvalidMemoryIndexedRange2UImm2:
5922 case Match_InvalidMemoryIndexedRange2UImm3:
5923 return Error(
5924 Loc,
5925 "vector select offset must be an immediate range of the form "
5926 "<immf>:<imml>, "
5927 "where the first immediate is a multiple of 2 in the range [0, 6] or "
5928 "[0, 14] "
5929 "depending on the instruction, and the second immediate is immf + 1.");
5930 case Match_InvalidMemoryIndexedRange4UImm0:
5931 return Error(Loc, "vector select offset must be the immediate range 0:3.");
5932 case Match_InvalidMemoryIndexedRange4UImm1:
5933 case Match_InvalidMemoryIndexedRange4UImm2:
5934 return Error(
5935 Loc,
5936 "vector select offset must be an immediate range of the form "
5937 "<immf>:<imml>, "
5938 "where the first immediate is a multiple of 4 in the range [0, 4] or "
5939 "[0, 12] "
5940 "depending on the instruction, and the second immediate is immf + 3.");
5941 case Match_InvalidSVEAddSubImm8:
5942 return Error(Loc, "immediate must be an integer in range [0, 255]"
5943 " with a shift amount of 0");
5944 case Match_InvalidSVEAddSubImm16:
5945 case Match_InvalidSVEAddSubImm32:
5946 case Match_InvalidSVEAddSubImm64:
5947 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5948 "multiple of 256 in range [256, 65280]");
5949 case Match_InvalidSVECpyImm8:
5950 return Error(Loc, "immediate must be an integer in range [-128, 255]"
5951 " with a shift amount of 0");
5952 case Match_InvalidSVECpyImm16:
5953 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5954 "multiple of 256 in range [-32768, 65280]");
5955 case Match_InvalidSVECpyImm32:
5956 case Match_InvalidSVECpyImm64:
5957 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5958 "multiple of 256 in range [-32768, 32512]");
5959 case Match_InvalidIndexRange0_0:
5960 return Error(Loc, "expected lane specifier '[0]'");
5961 case Match_InvalidIndexRange1_1:
5962 return Error(Loc, "expected lane specifier '[1]'");
5963 case Match_InvalidIndexRange0_15:
5964 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5965 case Match_InvalidIndexRange0_7:
5966 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5967 case Match_InvalidIndexRange0_3:
5968 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5969 case Match_InvalidIndexRange0_1:
5970 return Error(Loc, "vector lane must be an integer in range [0, 1].");
5971 case Match_InvalidSVEIndexRange0_63:
5972 return Error(Loc, "vector lane must be an integer in range [0, 63].");
5973 case Match_InvalidSVEIndexRange0_31:
5974 return Error(Loc, "vector lane must be an integer in range [0, 31].");
5975 case Match_InvalidSVEIndexRange0_15:
5976 return Error(Loc, "vector lane must be an integer in range [0, 15].");
5977 case Match_InvalidSVEIndexRange0_7:
5978 return Error(Loc, "vector lane must be an integer in range [0, 7].");
5979 case Match_InvalidSVEIndexRange0_3:
5980 return Error(Loc, "vector lane must be an integer in range [0, 3].");
5981 case Match_InvalidLabel:
5982 return Error(Loc, "expected label or encodable integer pc offset");
5983 case Match_MRS:
5984 return Error(Loc, "expected readable system register");
5985 case Match_MSR:
5986 case Match_InvalidSVCR:
5987 return Error(Loc, "expected writable system register or pstate");
5988 case Match_InvalidComplexRotationEven:
5989 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5990 case Match_InvalidComplexRotationOdd:
5991 return Error(Loc, "complex rotation must be 90 or 270.");
5992 case Match_MnemonicFail: {
5993 std::string Suggestion = AArch64MnemonicSpellCheck(
5994 ((AArch64Operand &)*Operands[0]).getToken(),
5995 ComputeAvailableFeatures(STI->getFeatureBits()));
5996 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5997 }
5998 case Match_InvalidGPR64shifted8:
5999 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6000 case Match_InvalidGPR64shifted16:
6001 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6002 case Match_InvalidGPR64shifted32:
6003 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6004 case Match_InvalidGPR64shifted64:
6005 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6006 case Match_InvalidGPR64shifted128:
6007 return Error(
6008 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6009 case Match_InvalidGPR64NoXZRshifted8:
6010 return Error(Loc, "register must be x0..x30 without shift");
6011 case Match_InvalidGPR64NoXZRshifted16:
6012 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6013 case Match_InvalidGPR64NoXZRshifted32:
6014 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6015 case Match_InvalidGPR64NoXZRshifted64:
6016 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6017 case Match_InvalidGPR64NoXZRshifted128:
6018 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6019 case Match_InvalidZPR32UXTW8:
6020 case Match_InvalidZPR32SXTW8:
6021 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6022 case Match_InvalidZPR32UXTW16:
6023 case Match_InvalidZPR32SXTW16:
6024 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6025 case Match_InvalidZPR32UXTW32:
6026 case Match_InvalidZPR32SXTW32:
6027 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6028 case Match_InvalidZPR32UXTW64:
6029 case Match_InvalidZPR32SXTW64:
6030 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6031 case Match_InvalidZPR64UXTW8:
6032 case Match_InvalidZPR64SXTW8:
6033 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6034 case Match_InvalidZPR64UXTW16:
6035 case Match_InvalidZPR64SXTW16:
6036 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6037 case Match_InvalidZPR64UXTW32:
6038 case Match_InvalidZPR64SXTW32:
6039 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6040 case Match_InvalidZPR64UXTW64:
6041 case Match_InvalidZPR64SXTW64:
6042 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6043 case Match_InvalidZPR32LSL8:
6044 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6045 case Match_InvalidZPR32LSL16:
6046 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6047 case Match_InvalidZPR32LSL32:
6048 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6049 case Match_InvalidZPR32LSL64:
6050 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6051 case Match_InvalidZPR64LSL8:
6052 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6053 case Match_InvalidZPR64LSL16:
6054 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6055 case Match_InvalidZPR64LSL32:
6056 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6057 case Match_InvalidZPR64LSL64:
6058 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6059 case Match_InvalidZPR0:
6060 return Error(Loc, "expected register without element width suffix");
6061 case Match_InvalidZPR8:
6062 case Match_InvalidZPR16:
6063 case Match_InvalidZPR32:
6064 case Match_InvalidZPR64:
6065 case Match_InvalidZPR128:
6066 return Error(Loc, "invalid element width");
6067 case Match_InvalidZPR_3b8:
6068 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6069 case Match_InvalidZPR_3b16:
6070 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6071 case Match_InvalidZPR_3b32:
6072 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6073 case Match_InvalidZPR_4b8:
6074 return Error(Loc,
6075 "Invalid restricted vector register, expected z0.b..z15.b");
6076 case Match_InvalidZPR_4b16:
6077 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6078 case Match_InvalidZPR_4b32:
6079 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6080 case Match_InvalidZPR_4b64:
6081 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6082 case Match_InvalidSVEPattern:
6083 return Error(Loc, "invalid predicate pattern");
6084 case Match_InvalidSVEPPRorPNRAnyReg:
6085 case Match_InvalidSVEPPRorPNRBReg:
6086 case Match_InvalidSVEPredicateAnyReg:
6087 case Match_InvalidSVEPredicateBReg:
6088 case Match_InvalidSVEPredicateHReg:
6089 case Match_InvalidSVEPredicateSReg:
6090 case Match_InvalidSVEPredicateDReg:
6091 return Error(Loc, "invalid predicate register.");
6092 case Match_InvalidSVEPredicate3bAnyReg:
6093 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6094 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6095 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6096 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6097 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6098 return Error(Loc, "Invalid predicate register, expected PN in range "
6099 "pn8..pn15 with element suffix.");
6100 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6101 return Error(Loc, "invalid restricted predicate-as-counter register "
6102 "expected pn8..pn15");
6103 case Match_InvalidSVEPNPredicateBReg:
6104 case Match_InvalidSVEPNPredicateHReg:
6105 case Match_InvalidSVEPNPredicateSReg:
6106 case Match_InvalidSVEPNPredicateDReg:
6107 return Error(Loc, "Invalid predicate register, expected PN in range "
6108 "pn0..pn15 with element suffix.");
6109 case Match_InvalidSVEVecLenSpecifier:
6110 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6111 case Match_InvalidSVEPredicateListMul2x8:
6112 case Match_InvalidSVEPredicateListMul2x16:
6113 case Match_InvalidSVEPredicateListMul2x32:
6114 case Match_InvalidSVEPredicateListMul2x64:
6115 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6116 "predicate registers, where the first vector is a multiple of 2 "
6117 "and with correct element type");
6118 case Match_InvalidSVEExactFPImmOperandHalfOne:
6119 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6120 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6121 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6122 case Match_InvalidSVEExactFPImmOperandZeroOne:
6123 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6124 case Match_InvalidMatrixTileVectorH8:
6125 case Match_InvalidMatrixTileVectorV8:
6126 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6127 case Match_InvalidMatrixTileVectorH16:
6128 case Match_InvalidMatrixTileVectorV16:
6129 return Error(Loc,
6130 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6131 case Match_InvalidMatrixTileVectorH32:
6132 case Match_InvalidMatrixTileVectorV32:
6133 return Error(Loc,
6134 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6135 case Match_InvalidMatrixTileVectorH64:
6136 case Match_InvalidMatrixTileVectorV64:
6137 return Error(Loc,
6138 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6139 case Match_InvalidMatrixTileVectorH128:
6140 case Match_InvalidMatrixTileVectorV128:
6141 return Error(Loc,
6142 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6143 case Match_InvalidMatrixTile32:
6144 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6145 case Match_InvalidMatrixTile64:
6146 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6147 case Match_InvalidMatrix:
6148 return Error(Loc, "invalid matrix operand, expected za");
6149 case Match_InvalidMatrix8:
6150 return Error(Loc, "invalid matrix operand, expected suffix .b");
6151 case Match_InvalidMatrix16:
6152 return Error(Loc, "invalid matrix operand, expected suffix .h");
6153 case Match_InvalidMatrix32:
6154 return Error(Loc, "invalid matrix operand, expected suffix .s");
6155 case Match_InvalidMatrix64:
6156 return Error(Loc, "invalid matrix operand, expected suffix .d");
6157 case Match_InvalidMatrixIndexGPR32_12_15:
6158 return Error(Loc, "operand must be a register in range [w12, w15]");
6159 case Match_InvalidMatrixIndexGPR32_8_11:
6160 return Error(Loc, "operand must be a register in range [w8, w11]");
6161 case Match_InvalidSVEVectorListMul2x8:
6162 case Match_InvalidSVEVectorListMul2x16:
6163 case Match_InvalidSVEVectorListMul2x32:
6164 case Match_InvalidSVEVectorListMul2x64:
6165 case Match_InvalidSVEVectorListMul2x128:
6166 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6167 "SVE vectors, where the first vector is a multiple of 2 "
6168 "and with matching element types");
6169 case Match_InvalidSVEVectorListMul4x8:
6170 case Match_InvalidSVEVectorListMul4x16:
6171 case Match_InvalidSVEVectorListMul4x32:
6172 case Match_InvalidSVEVectorListMul4x64:
6173 case Match_InvalidSVEVectorListMul4x128:
6174 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6175 "SVE vectors, where the first vector is a multiple of 4 "
6176 "and with matching element types");
6177 case Match_InvalidLookupTable:
6178 return Error(Loc, "Invalid lookup table, expected zt0");
6179 case Match_InvalidSVEVectorListStrided2x8:
6180 case Match_InvalidSVEVectorListStrided2x16:
6181 case Match_InvalidSVEVectorListStrided2x32:
6182 case Match_InvalidSVEVectorListStrided2x64:
6183 return Error(
6184 Loc,
6185 "Invalid vector list, expected list with each SVE vector in the list "
6186 "8 registers apart, and the first register in the range [z0, z7] or "
6187 "[z16, z23] and with correct element type");
6188 case Match_InvalidSVEVectorListStrided4x8:
6189 case Match_InvalidSVEVectorListStrided4x16:
6190 case Match_InvalidSVEVectorListStrided4x32:
6191 case Match_InvalidSVEVectorListStrided4x64:
6192 return Error(
6193 Loc,
6194 "Invalid vector list, expected list with each SVE vector in the list "
6195 "4 registers apart, and the first register in the range [z0, z3] or "
6196 "[z16, z19] and with correct element type");
6197 case Match_AddSubLSLImm3ShiftLarge:
6198 return Error(Loc,
6199 "expected 'lsl' with optional integer in range [0, 7]");
6200 default:
6201 llvm_unreachable("unexpected error code!");
6202 }
6203 }
6204
6205 static const char *getSubtargetFeatureName(uint64_t Val);
6206
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)6207 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6208 OperandVector &Operands,
6209 MCStreamer &Out,
6210 uint64_t &ErrorInfo,
6211 bool MatchingInlineAsm) {
6212 assert(!Operands.empty() && "Unexpect empty operand list!");
6213 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6214 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6215
6216 StringRef Tok = Op.getToken();
6217 unsigned NumOperands = Operands.size();
6218
6219 if (NumOperands == 4 && Tok == "lsl") {
6220 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6221 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6222 if (Op2.isScalarReg() && Op3.isImm()) {
6223 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6224 if (Op3CE) {
6225 uint64_t Op3Val = Op3CE->getValue();
6226 uint64_t NewOp3Val = 0;
6227 uint64_t NewOp4Val = 0;
6228 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6229 Op2.getReg())) {
6230 NewOp3Val = (32 - Op3Val) & 0x1f;
6231 NewOp4Val = 31 - Op3Val;
6232 } else {
6233 NewOp3Val = (64 - Op3Val) & 0x3f;
6234 NewOp4Val = 63 - Op3Val;
6235 }
6236
6237 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6238 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6239
6240 Operands[0] =
6241 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6242 Operands.push_back(AArch64Operand::CreateImm(
6243 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6244 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6245 Op3.getEndLoc(), getContext());
6246 }
6247 }
6248 } else if (NumOperands == 4 && Tok == "bfc") {
6249 // FIXME: Horrible hack to handle BFC->BFM alias.
6250 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6251 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6252 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6253
6254 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6255 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6256 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6257
6258 if (LSBCE && WidthCE) {
6259 uint64_t LSB = LSBCE->getValue();
6260 uint64_t Width = WidthCE->getValue();
6261
6262 uint64_t RegWidth = 0;
6263 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6264 Op1.getReg()))
6265 RegWidth = 64;
6266 else
6267 RegWidth = 32;
6268
6269 if (LSB >= RegWidth)
6270 return Error(LSBOp.getStartLoc(),
6271 "expected integer in range [0, 31]");
6272 if (Width < 1 || Width > RegWidth)
6273 return Error(WidthOp.getStartLoc(),
6274 "expected integer in range [1, 32]");
6275
6276 uint64_t ImmR = 0;
6277 if (RegWidth == 32)
6278 ImmR = (32 - LSB) & 0x1f;
6279 else
6280 ImmR = (64 - LSB) & 0x3f;
6281
6282 uint64_t ImmS = Width - 1;
6283
6284 if (ImmR != 0 && ImmS >= ImmR)
6285 return Error(WidthOp.getStartLoc(),
6286 "requested insert overflows register");
6287
6288 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6289 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6290 Operands[0] =
6291 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6292 Operands[2] = AArch64Operand::CreateReg(
6293 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6294 SMLoc(), SMLoc(), getContext());
6295 Operands[3] = AArch64Operand::CreateImm(
6296 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6297 Operands.emplace_back(
6298 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6299 WidthOp.getEndLoc(), getContext()));
6300 }
6301 }
6302 } else if (NumOperands == 5) {
6303 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6304 // UBFIZ -> UBFM aliases.
6305 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6306 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6307 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6308 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6309
6310 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6311 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6312 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6313
6314 if (Op3CE && Op4CE) {
6315 uint64_t Op3Val = Op3CE->getValue();
6316 uint64_t Op4Val = Op4CE->getValue();
6317
6318 uint64_t RegWidth = 0;
6319 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6320 Op1.getReg()))
6321 RegWidth = 64;
6322 else
6323 RegWidth = 32;
6324
6325 if (Op3Val >= RegWidth)
6326 return Error(Op3.getStartLoc(),
6327 "expected integer in range [0, 31]");
6328 if (Op4Val < 1 || Op4Val > RegWidth)
6329 return Error(Op4.getStartLoc(),
6330 "expected integer in range [1, 32]");
6331
6332 uint64_t NewOp3Val = 0;
6333 if (RegWidth == 32)
6334 NewOp3Val = (32 - Op3Val) & 0x1f;
6335 else
6336 NewOp3Val = (64 - Op3Val) & 0x3f;
6337
6338 uint64_t NewOp4Val = Op4Val - 1;
6339
6340 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6341 return Error(Op4.getStartLoc(),
6342 "requested insert overflows register");
6343
6344 const MCExpr *NewOp3 =
6345 MCConstantExpr::create(NewOp3Val, getContext());
6346 const MCExpr *NewOp4 =
6347 MCConstantExpr::create(NewOp4Val, getContext());
6348 Operands[3] = AArch64Operand::CreateImm(
6349 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6350 Operands[4] = AArch64Operand::CreateImm(
6351 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6352 if (Tok == "bfi")
6353 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6354 getContext());
6355 else if (Tok == "sbfiz")
6356 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6357 getContext());
6358 else if (Tok == "ubfiz")
6359 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6360 getContext());
6361 else
6362 llvm_unreachable("No valid mnemonic for alias?");
6363 }
6364 }
6365
6366 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6367 // UBFX -> UBFM aliases.
6368 } else if (NumOperands == 5 &&
6369 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6370 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6371 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6372 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6373
6374 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6375 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6376 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6377
6378 if (Op3CE && Op4CE) {
6379 uint64_t Op3Val = Op3CE->getValue();
6380 uint64_t Op4Val = Op4CE->getValue();
6381
6382 uint64_t RegWidth = 0;
6383 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6384 Op1.getReg()))
6385 RegWidth = 64;
6386 else
6387 RegWidth = 32;
6388
6389 if (Op3Val >= RegWidth)
6390 return Error(Op3.getStartLoc(),
6391 "expected integer in range [0, 31]");
6392 if (Op4Val < 1 || Op4Val > RegWidth)
6393 return Error(Op4.getStartLoc(),
6394 "expected integer in range [1, 32]");
6395
6396 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6397
6398 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6399 return Error(Op4.getStartLoc(),
6400 "requested extract overflows register");
6401
6402 const MCExpr *NewOp4 =
6403 MCConstantExpr::create(NewOp4Val, getContext());
6404 Operands[4] = AArch64Operand::CreateImm(
6405 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6406 if (Tok == "bfxil")
6407 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6408 getContext());
6409 else if (Tok == "sbfx")
6410 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6411 getContext());
6412 else if (Tok == "ubfx")
6413 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6414 getContext());
6415 else
6416 llvm_unreachable("No valid mnemonic for alias?");
6417 }
6418 }
6419 }
6420 }
6421
6422 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6423 // instruction for FP registers correctly in some rare circumstances. Convert
6424 // it to a safe instruction and warn (because silently changing someone's
6425 // assembly is rude).
6426 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6427 NumOperands == 4 && Tok == "movi") {
6428 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6429 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6430 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6431 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6432 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6433 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6434 if (Suffix.lower() == ".2d" &&
6435 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6436 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6437 " correctly on this CPU, converting to equivalent movi.16b");
6438 // Switch the suffix to .16b.
6439 unsigned Idx = Op1.isToken() ? 1 : 2;
6440 Operands[Idx] =
6441 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6442 }
6443 }
6444 }
6445
6446 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6447 // InstAlias can't quite handle this since the reg classes aren't
6448 // subclasses.
6449 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6450 // The source register can be Wn here, but the matcher expects a
6451 // GPR64. Twiddle it here if necessary.
6452 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6453 if (Op.isScalarReg()) {
6454 unsigned Reg = getXRegFromWReg(Op.getReg());
6455 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6456 Op.getStartLoc(), Op.getEndLoc(),
6457 getContext());
6458 }
6459 }
6460 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6461 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6462 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6463 if (Op.isScalarReg() &&
6464 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6465 Op.getReg())) {
6466 // The source register can be Wn here, but the matcher expects a
6467 // GPR64. Twiddle it here if necessary.
6468 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6469 if (Op.isScalarReg()) {
6470 unsigned Reg = getXRegFromWReg(Op.getReg());
6471 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6472 Op.getStartLoc(),
6473 Op.getEndLoc(), getContext());
6474 }
6475 }
6476 }
6477 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6478 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6479 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6480 if (Op.isScalarReg() &&
6481 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6482 Op.getReg())) {
6483 // The source register can be Wn here, but the matcher expects a
6484 // GPR32. Twiddle it here if necessary.
6485 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6486 if (Op.isScalarReg()) {
6487 unsigned Reg = getWRegFromXReg(Op.getReg());
6488 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6489 Op.getStartLoc(),
6490 Op.getEndLoc(), getContext());
6491 }
6492 }
6493 }
6494
6495 MCInst Inst;
6496 FeatureBitset MissingFeatures;
6497 // First try to match against the secondary set of tables containing the
6498 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6499 unsigned MatchResult =
6500 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6501 MatchingInlineAsm, 1);
6502
6503 // If that fails, try against the alternate table containing long-form NEON:
6504 // "fadd v0.2s, v1.2s, v2.2s"
6505 if (MatchResult != Match_Success) {
6506 // But first, save the short-form match result: we can use it in case the
6507 // long-form match also fails.
6508 auto ShortFormNEONErrorInfo = ErrorInfo;
6509 auto ShortFormNEONMatchResult = MatchResult;
6510 auto ShortFormNEONMissingFeatures = MissingFeatures;
6511
6512 MatchResult =
6513 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6514 MatchingInlineAsm, 0);
6515
6516 // Now, both matches failed, and the long-form match failed on the mnemonic
6517 // suffix token operand. The short-form match failure is probably more
6518 // relevant: use it instead.
6519 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6520 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6521 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6522 MatchResult = ShortFormNEONMatchResult;
6523 ErrorInfo = ShortFormNEONErrorInfo;
6524 MissingFeatures = ShortFormNEONMissingFeatures;
6525 }
6526 }
6527
6528 switch (MatchResult) {
6529 case Match_Success: {
6530 // Perform range checking and other semantic validations
6531 SmallVector<SMLoc, 8> OperandLocs;
6532 NumOperands = Operands.size();
6533 for (unsigned i = 1; i < NumOperands; ++i)
6534 OperandLocs.push_back(Operands[i]->getStartLoc());
6535 if (validateInstruction(Inst, IDLoc, OperandLocs))
6536 return true;
6537
6538 Inst.setLoc(IDLoc);
6539 Out.emitInstruction(Inst, getSTI());
6540 return false;
6541 }
6542 case Match_MissingFeature: {
6543 assert(MissingFeatures.any() && "Unknown missing feature!");
6544 // Special case the error message for the very common case where only
6545 // a single subtarget feature is missing (neon, e.g.).
6546 std::string Msg = "instruction requires:";
6547 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6548 if (MissingFeatures[i]) {
6549 Msg += " ";
6550 Msg += getSubtargetFeatureName(i);
6551 }
6552 }
6553 return Error(IDLoc, Msg);
6554 }
6555 case Match_MnemonicFail:
6556 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6557 case Match_InvalidOperand: {
6558 SMLoc ErrorLoc = IDLoc;
6559
6560 if (ErrorInfo != ~0ULL) {
6561 if (ErrorInfo >= Operands.size())
6562 return Error(IDLoc, "too few operands for instruction",
6563 SMRange(IDLoc, getTok().getLoc()));
6564
6565 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6566 if (ErrorLoc == SMLoc())
6567 ErrorLoc = IDLoc;
6568 }
6569 // If the match failed on a suffix token operand, tweak the diagnostic
6570 // accordingly.
6571 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6572 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6573 MatchResult = Match_InvalidSuffix;
6574
6575 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6576 }
6577 case Match_InvalidTiedOperand:
6578 case Match_InvalidMemoryIndexed1:
6579 case Match_InvalidMemoryIndexed2:
6580 case Match_InvalidMemoryIndexed4:
6581 case Match_InvalidMemoryIndexed8:
6582 case Match_InvalidMemoryIndexed16:
6583 case Match_InvalidCondCode:
6584 case Match_AddSubLSLImm3ShiftLarge:
6585 case Match_AddSubRegExtendSmall:
6586 case Match_AddSubRegExtendLarge:
6587 case Match_AddSubSecondSource:
6588 case Match_LogicalSecondSource:
6589 case Match_AddSubRegShift32:
6590 case Match_AddSubRegShift64:
6591 case Match_InvalidMovImm32Shift:
6592 case Match_InvalidMovImm64Shift:
6593 case Match_InvalidFPImm:
6594 case Match_InvalidMemoryWExtend8:
6595 case Match_InvalidMemoryWExtend16:
6596 case Match_InvalidMemoryWExtend32:
6597 case Match_InvalidMemoryWExtend64:
6598 case Match_InvalidMemoryWExtend128:
6599 case Match_InvalidMemoryXExtend8:
6600 case Match_InvalidMemoryXExtend16:
6601 case Match_InvalidMemoryXExtend32:
6602 case Match_InvalidMemoryXExtend64:
6603 case Match_InvalidMemoryXExtend128:
6604 case Match_InvalidMemoryIndexed1SImm4:
6605 case Match_InvalidMemoryIndexed2SImm4:
6606 case Match_InvalidMemoryIndexed3SImm4:
6607 case Match_InvalidMemoryIndexed4SImm4:
6608 case Match_InvalidMemoryIndexed1SImm6:
6609 case Match_InvalidMemoryIndexed16SImm4:
6610 case Match_InvalidMemoryIndexed32SImm4:
6611 case Match_InvalidMemoryIndexed4SImm7:
6612 case Match_InvalidMemoryIndexed8SImm7:
6613 case Match_InvalidMemoryIndexed16SImm7:
6614 case Match_InvalidMemoryIndexed8UImm5:
6615 case Match_InvalidMemoryIndexed8UImm3:
6616 case Match_InvalidMemoryIndexed4UImm5:
6617 case Match_InvalidMemoryIndexed2UImm5:
6618 case Match_InvalidMemoryIndexed1UImm6:
6619 case Match_InvalidMemoryIndexed2UImm6:
6620 case Match_InvalidMemoryIndexed4UImm6:
6621 case Match_InvalidMemoryIndexed8UImm6:
6622 case Match_InvalidMemoryIndexed16UImm6:
6623 case Match_InvalidMemoryIndexedSImm6:
6624 case Match_InvalidMemoryIndexedSImm5:
6625 case Match_InvalidMemoryIndexedSImm8:
6626 case Match_InvalidMemoryIndexedSImm9:
6627 case Match_InvalidMemoryIndexed16SImm9:
6628 case Match_InvalidMemoryIndexed8SImm10:
6629 case Match_InvalidImm0_0:
6630 case Match_InvalidImm0_1:
6631 case Match_InvalidImm0_3:
6632 case Match_InvalidImm0_7:
6633 case Match_InvalidImm0_15:
6634 case Match_InvalidImm0_31:
6635 case Match_InvalidImm0_63:
6636 case Match_InvalidImm0_127:
6637 case Match_InvalidImm0_255:
6638 case Match_InvalidImm0_65535:
6639 case Match_InvalidImm1_8:
6640 case Match_InvalidImm1_16:
6641 case Match_InvalidImm1_32:
6642 case Match_InvalidImm1_64:
6643 case Match_InvalidMemoryIndexedRange2UImm0:
6644 case Match_InvalidMemoryIndexedRange2UImm1:
6645 case Match_InvalidMemoryIndexedRange2UImm2:
6646 case Match_InvalidMemoryIndexedRange2UImm3:
6647 case Match_InvalidMemoryIndexedRange4UImm0:
6648 case Match_InvalidMemoryIndexedRange4UImm1:
6649 case Match_InvalidMemoryIndexedRange4UImm2:
6650 case Match_InvalidSVEAddSubImm8:
6651 case Match_InvalidSVEAddSubImm16:
6652 case Match_InvalidSVEAddSubImm32:
6653 case Match_InvalidSVEAddSubImm64:
6654 case Match_InvalidSVECpyImm8:
6655 case Match_InvalidSVECpyImm16:
6656 case Match_InvalidSVECpyImm32:
6657 case Match_InvalidSVECpyImm64:
6658 case Match_InvalidIndexRange0_0:
6659 case Match_InvalidIndexRange1_1:
6660 case Match_InvalidIndexRange0_15:
6661 case Match_InvalidIndexRange0_7:
6662 case Match_InvalidIndexRange0_3:
6663 case Match_InvalidIndexRange0_1:
6664 case Match_InvalidSVEIndexRange0_63:
6665 case Match_InvalidSVEIndexRange0_31:
6666 case Match_InvalidSVEIndexRange0_15:
6667 case Match_InvalidSVEIndexRange0_7:
6668 case Match_InvalidSVEIndexRange0_3:
6669 case Match_InvalidLabel:
6670 case Match_InvalidComplexRotationEven:
6671 case Match_InvalidComplexRotationOdd:
6672 case Match_InvalidGPR64shifted8:
6673 case Match_InvalidGPR64shifted16:
6674 case Match_InvalidGPR64shifted32:
6675 case Match_InvalidGPR64shifted64:
6676 case Match_InvalidGPR64shifted128:
6677 case Match_InvalidGPR64NoXZRshifted8:
6678 case Match_InvalidGPR64NoXZRshifted16:
6679 case Match_InvalidGPR64NoXZRshifted32:
6680 case Match_InvalidGPR64NoXZRshifted64:
6681 case Match_InvalidGPR64NoXZRshifted128:
6682 case Match_InvalidZPR32UXTW8:
6683 case Match_InvalidZPR32UXTW16:
6684 case Match_InvalidZPR32UXTW32:
6685 case Match_InvalidZPR32UXTW64:
6686 case Match_InvalidZPR32SXTW8:
6687 case Match_InvalidZPR32SXTW16:
6688 case Match_InvalidZPR32SXTW32:
6689 case Match_InvalidZPR32SXTW64:
6690 case Match_InvalidZPR64UXTW8:
6691 case Match_InvalidZPR64SXTW8:
6692 case Match_InvalidZPR64UXTW16:
6693 case Match_InvalidZPR64SXTW16:
6694 case Match_InvalidZPR64UXTW32:
6695 case Match_InvalidZPR64SXTW32:
6696 case Match_InvalidZPR64UXTW64:
6697 case Match_InvalidZPR64SXTW64:
6698 case Match_InvalidZPR32LSL8:
6699 case Match_InvalidZPR32LSL16:
6700 case Match_InvalidZPR32LSL32:
6701 case Match_InvalidZPR32LSL64:
6702 case Match_InvalidZPR64LSL8:
6703 case Match_InvalidZPR64LSL16:
6704 case Match_InvalidZPR64LSL32:
6705 case Match_InvalidZPR64LSL64:
6706 case Match_InvalidZPR0:
6707 case Match_InvalidZPR8:
6708 case Match_InvalidZPR16:
6709 case Match_InvalidZPR32:
6710 case Match_InvalidZPR64:
6711 case Match_InvalidZPR128:
6712 case Match_InvalidZPR_3b8:
6713 case Match_InvalidZPR_3b16:
6714 case Match_InvalidZPR_3b32:
6715 case Match_InvalidZPR_4b8:
6716 case Match_InvalidZPR_4b16:
6717 case Match_InvalidZPR_4b32:
6718 case Match_InvalidZPR_4b64:
6719 case Match_InvalidSVEPPRorPNRAnyReg:
6720 case Match_InvalidSVEPPRorPNRBReg:
6721 case Match_InvalidSVEPredicateAnyReg:
6722 case Match_InvalidSVEPattern:
6723 case Match_InvalidSVEVecLenSpecifier:
6724 case Match_InvalidSVEPredicateBReg:
6725 case Match_InvalidSVEPredicateHReg:
6726 case Match_InvalidSVEPredicateSReg:
6727 case Match_InvalidSVEPredicateDReg:
6728 case Match_InvalidSVEPredicate3bAnyReg:
6729 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6730 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6731 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6732 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6733 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6734 case Match_InvalidSVEPNPredicateBReg:
6735 case Match_InvalidSVEPNPredicateHReg:
6736 case Match_InvalidSVEPNPredicateSReg:
6737 case Match_InvalidSVEPNPredicateDReg:
6738 case Match_InvalidSVEPredicateListMul2x8:
6739 case Match_InvalidSVEPredicateListMul2x16:
6740 case Match_InvalidSVEPredicateListMul2x32:
6741 case Match_InvalidSVEPredicateListMul2x64:
6742 case Match_InvalidSVEExactFPImmOperandHalfOne:
6743 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6744 case Match_InvalidSVEExactFPImmOperandZeroOne:
6745 case Match_InvalidMatrixTile32:
6746 case Match_InvalidMatrixTile64:
6747 case Match_InvalidMatrix:
6748 case Match_InvalidMatrix8:
6749 case Match_InvalidMatrix16:
6750 case Match_InvalidMatrix32:
6751 case Match_InvalidMatrix64:
6752 case Match_InvalidMatrixTileVectorH8:
6753 case Match_InvalidMatrixTileVectorH16:
6754 case Match_InvalidMatrixTileVectorH32:
6755 case Match_InvalidMatrixTileVectorH64:
6756 case Match_InvalidMatrixTileVectorH128:
6757 case Match_InvalidMatrixTileVectorV8:
6758 case Match_InvalidMatrixTileVectorV16:
6759 case Match_InvalidMatrixTileVectorV32:
6760 case Match_InvalidMatrixTileVectorV64:
6761 case Match_InvalidMatrixTileVectorV128:
6762 case Match_InvalidSVCR:
6763 case Match_InvalidMatrixIndexGPR32_12_15:
6764 case Match_InvalidMatrixIndexGPR32_8_11:
6765 case Match_InvalidLookupTable:
6766 case Match_InvalidSVEVectorListMul2x8:
6767 case Match_InvalidSVEVectorListMul2x16:
6768 case Match_InvalidSVEVectorListMul2x32:
6769 case Match_InvalidSVEVectorListMul2x64:
6770 case Match_InvalidSVEVectorListMul2x128:
6771 case Match_InvalidSVEVectorListMul4x8:
6772 case Match_InvalidSVEVectorListMul4x16:
6773 case Match_InvalidSVEVectorListMul4x32:
6774 case Match_InvalidSVEVectorListMul4x64:
6775 case Match_InvalidSVEVectorListMul4x128:
6776 case Match_InvalidSVEVectorListStrided2x8:
6777 case Match_InvalidSVEVectorListStrided2x16:
6778 case Match_InvalidSVEVectorListStrided2x32:
6779 case Match_InvalidSVEVectorListStrided2x64:
6780 case Match_InvalidSVEVectorListStrided4x8:
6781 case Match_InvalidSVEVectorListStrided4x16:
6782 case Match_InvalidSVEVectorListStrided4x32:
6783 case Match_InvalidSVEVectorListStrided4x64:
6784 case Match_MSR:
6785 case Match_MRS: {
6786 if (ErrorInfo >= Operands.size())
6787 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6788 // Any time we get here, there's nothing fancy to do. Just get the
6789 // operand SMLoc and display the diagnostic.
6790 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6791 if (ErrorLoc == SMLoc())
6792 ErrorLoc = IDLoc;
6793 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6794 }
6795 }
6796
6797 llvm_unreachable("Implement any new match types added!");
6798 }
6799
6800 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)6801 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6802 const MCContext::Environment Format = getContext().getObjectFileType();
6803 bool IsMachO = Format == MCContext::IsMachO;
6804 bool IsCOFF = Format == MCContext::IsCOFF;
6805
6806 auto IDVal = DirectiveID.getIdentifier().lower();
6807 SMLoc Loc = DirectiveID.getLoc();
6808 if (IDVal == ".arch")
6809 parseDirectiveArch(Loc);
6810 else if (IDVal == ".cpu")
6811 parseDirectiveCPU(Loc);
6812 else if (IDVal == ".tlsdesccall")
6813 parseDirectiveTLSDescCall(Loc);
6814 else if (IDVal == ".ltorg" || IDVal == ".pool")
6815 parseDirectiveLtorg(Loc);
6816 else if (IDVal == ".unreq")
6817 parseDirectiveUnreq(Loc);
6818 else if (IDVal == ".inst")
6819 parseDirectiveInst(Loc);
6820 else if (IDVal == ".cfi_negate_ra_state")
6821 parseDirectiveCFINegateRAState();
6822 else if (IDVal == ".cfi_b_key_frame")
6823 parseDirectiveCFIBKeyFrame();
6824 else if (IDVal == ".cfi_mte_tagged_frame")
6825 parseDirectiveCFIMTETaggedFrame();
6826 else if (IDVal == ".arch_extension")
6827 parseDirectiveArchExtension(Loc);
6828 else if (IDVal == ".variant_pcs")
6829 parseDirectiveVariantPCS(Loc);
6830 else if (IsMachO) {
6831 if (IDVal == MCLOHDirectiveName())
6832 parseDirectiveLOH(IDVal, Loc);
6833 else
6834 return true;
6835 } else if (IsCOFF) {
6836 if (IDVal == ".seh_stackalloc")
6837 parseDirectiveSEHAllocStack(Loc);
6838 else if (IDVal == ".seh_endprologue")
6839 parseDirectiveSEHPrologEnd(Loc);
6840 else if (IDVal == ".seh_save_r19r20_x")
6841 parseDirectiveSEHSaveR19R20X(Loc);
6842 else if (IDVal == ".seh_save_fplr")
6843 parseDirectiveSEHSaveFPLR(Loc);
6844 else if (IDVal == ".seh_save_fplr_x")
6845 parseDirectiveSEHSaveFPLRX(Loc);
6846 else if (IDVal == ".seh_save_reg")
6847 parseDirectiveSEHSaveReg(Loc);
6848 else if (IDVal == ".seh_save_reg_x")
6849 parseDirectiveSEHSaveRegX(Loc);
6850 else if (IDVal == ".seh_save_regp")
6851 parseDirectiveSEHSaveRegP(Loc);
6852 else if (IDVal == ".seh_save_regp_x")
6853 parseDirectiveSEHSaveRegPX(Loc);
6854 else if (IDVal == ".seh_save_lrpair")
6855 parseDirectiveSEHSaveLRPair(Loc);
6856 else if (IDVal == ".seh_save_freg")
6857 parseDirectiveSEHSaveFReg(Loc);
6858 else if (IDVal == ".seh_save_freg_x")
6859 parseDirectiveSEHSaveFRegX(Loc);
6860 else if (IDVal == ".seh_save_fregp")
6861 parseDirectiveSEHSaveFRegP(Loc);
6862 else if (IDVal == ".seh_save_fregp_x")
6863 parseDirectiveSEHSaveFRegPX(Loc);
6864 else if (IDVal == ".seh_set_fp")
6865 parseDirectiveSEHSetFP(Loc);
6866 else if (IDVal == ".seh_add_fp")
6867 parseDirectiveSEHAddFP(Loc);
6868 else if (IDVal == ".seh_nop")
6869 parseDirectiveSEHNop(Loc);
6870 else if (IDVal == ".seh_save_next")
6871 parseDirectiveSEHSaveNext(Loc);
6872 else if (IDVal == ".seh_startepilogue")
6873 parseDirectiveSEHEpilogStart(Loc);
6874 else if (IDVal == ".seh_endepilogue")
6875 parseDirectiveSEHEpilogEnd(Loc);
6876 else if (IDVal == ".seh_trap_frame")
6877 parseDirectiveSEHTrapFrame(Loc);
6878 else if (IDVal == ".seh_pushframe")
6879 parseDirectiveSEHMachineFrame(Loc);
6880 else if (IDVal == ".seh_context")
6881 parseDirectiveSEHContext(Loc);
6882 else if (IDVal == ".seh_ec_context")
6883 parseDirectiveSEHECContext(Loc);
6884 else if (IDVal == ".seh_clear_unwound_to_call")
6885 parseDirectiveSEHClearUnwoundToCall(Loc);
6886 else if (IDVal == ".seh_pac_sign_lr")
6887 parseDirectiveSEHPACSignLR(Loc);
6888 else if (IDVal == ".seh_save_any_reg")
6889 parseDirectiveSEHSaveAnyReg(Loc, false, false);
6890 else if (IDVal == ".seh_save_any_reg_p")
6891 parseDirectiveSEHSaveAnyReg(Loc, true, false);
6892 else if (IDVal == ".seh_save_any_reg_x")
6893 parseDirectiveSEHSaveAnyReg(Loc, false, true);
6894 else if (IDVal == ".seh_save_any_reg_px")
6895 parseDirectiveSEHSaveAnyReg(Loc, true, true);
6896 else
6897 return true;
6898 } else
6899 return true;
6900 return false;
6901 }
6902
ExpandCryptoAEK(const AArch64::ArchInfo & ArchInfo,SmallVector<StringRef,4> & RequestedExtensions)6903 static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6904 SmallVector<StringRef, 4> &RequestedExtensions) {
6905 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6906 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6907
6908 if (!NoCrypto && Crypto) {
6909 // Map 'generic' (and others) to sha2 and aes, because
6910 // that was the traditional meaning of crypto.
6911 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6912 ArchInfo == AArch64::ARMV8_3A) {
6913 RequestedExtensions.push_back("sha2");
6914 RequestedExtensions.push_back("aes");
6915 }
6916 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6917 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6918 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6919 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6920 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6921 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6922 RequestedExtensions.push_back("sm4");
6923 RequestedExtensions.push_back("sha3");
6924 RequestedExtensions.push_back("sha2");
6925 RequestedExtensions.push_back("aes");
6926 }
6927 } else if (NoCrypto) {
6928 // Map 'generic' (and others) to sha2 and aes, because
6929 // that was the traditional meaning of crypto.
6930 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6931 ArchInfo == AArch64::ARMV8_3A) {
6932 RequestedExtensions.push_back("nosha2");
6933 RequestedExtensions.push_back("noaes");
6934 }
6935 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6936 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6937 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6938 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6939 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6940 ArchInfo == AArch64::ARMV9_4A) {
6941 RequestedExtensions.push_back("nosm4");
6942 RequestedExtensions.push_back("nosha3");
6943 RequestedExtensions.push_back("nosha2");
6944 RequestedExtensions.push_back("noaes");
6945 }
6946 }
6947 }
6948
6949 /// parseDirectiveArch
6950 /// ::= .arch token
parseDirectiveArch(SMLoc L)6951 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6952 SMLoc ArchLoc = getLoc();
6953
6954 StringRef Arch, ExtensionString;
6955 std::tie(Arch, ExtensionString) =
6956 getParser().parseStringToEndOfStatement().trim().split('+');
6957
6958 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
6959 if (!ArchInfo)
6960 return Error(ArchLoc, "unknown arch name");
6961
6962 if (parseToken(AsmToken::EndOfStatement))
6963 return true;
6964
6965 // Get the architecture and extension features.
6966 std::vector<StringRef> AArch64Features;
6967 AArch64Features.push_back(ArchInfo->ArchFeature);
6968 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6969
6970 MCSubtargetInfo &STI = copySTI();
6971 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6972 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6973 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6974
6975 SmallVector<StringRef, 4> RequestedExtensions;
6976 if (!ExtensionString.empty())
6977 ExtensionString.split(RequestedExtensions, '+');
6978
6979 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6980
6981 FeatureBitset Features = STI.getFeatureBits();
6982 setAvailableFeatures(ComputeAvailableFeatures(Features));
6983 for (auto Name : RequestedExtensions) {
6984 bool EnableFeature = !Name.consume_front_insensitive("no");
6985
6986 for (const auto &Extension : ExtensionMap) {
6987 if (Extension.Name != Name)
6988 continue;
6989
6990 if (Extension.Features.none())
6991 report_fatal_error("unsupported architectural extension: " + Name);
6992
6993 FeatureBitset ToggleFeatures =
6994 EnableFeature
6995 ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6996 : STI.ToggleFeature(Features & Extension.Features);
6997 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6998 break;
6999 }
7000 }
7001 return false;
7002 }
7003
7004 /// parseDirectiveArchExtension
7005 /// ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)7006 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7007 SMLoc ExtLoc = getLoc();
7008
7009 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7010
7011 if (parseEOL())
7012 return true;
7013
7014 bool EnableFeature = true;
7015 if (Name.starts_with_insensitive("no")) {
7016 EnableFeature = false;
7017 Name = Name.substr(2);
7018 }
7019
7020 MCSubtargetInfo &STI = copySTI();
7021 FeatureBitset Features = STI.getFeatureBits();
7022 for (const auto &Extension : ExtensionMap) {
7023 if (Extension.Name != Name)
7024 continue;
7025
7026 if (Extension.Features.none())
7027 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7028
7029 FeatureBitset ToggleFeatures =
7030 EnableFeature
7031 ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
7032 : STI.ToggleFeature(Features & Extension.Features);
7033 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7034 return false;
7035 }
7036
7037 return Error(ExtLoc, "unknown architectural extension: " + Name);
7038 }
7039
incrementLoc(SMLoc L,int Offset)7040 static SMLoc incrementLoc(SMLoc L, int Offset) {
7041 return SMLoc::getFromPointer(L.getPointer() + Offset);
7042 }
7043
7044 /// parseDirectiveCPU
7045 /// ::= .cpu id
parseDirectiveCPU(SMLoc L)7046 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7047 SMLoc CurLoc = getLoc();
7048
7049 StringRef CPU, ExtensionString;
7050 std::tie(CPU, ExtensionString) =
7051 getParser().parseStringToEndOfStatement().trim().split('+');
7052
7053 if (parseToken(AsmToken::EndOfStatement))
7054 return true;
7055
7056 SmallVector<StringRef, 4> RequestedExtensions;
7057 if (!ExtensionString.empty())
7058 ExtensionString.split(RequestedExtensions, '+');
7059
7060 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7061 if (!CpuArch) {
7062 Error(CurLoc, "unknown CPU name");
7063 return false;
7064 }
7065 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7066
7067 MCSubtargetInfo &STI = copySTI();
7068 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7069 CurLoc = incrementLoc(CurLoc, CPU.size());
7070
7071 for (auto Name : RequestedExtensions) {
7072 // Advance source location past '+'.
7073 CurLoc = incrementLoc(CurLoc, 1);
7074
7075 bool EnableFeature = !Name.consume_front_insensitive("no");
7076
7077 bool FoundExtension = false;
7078 for (const auto &Extension : ExtensionMap) {
7079 if (Extension.Name != Name)
7080 continue;
7081
7082 if (Extension.Features.none())
7083 report_fatal_error("unsupported architectural extension: " + Name);
7084
7085 FeatureBitset Features = STI.getFeatureBits();
7086 FeatureBitset ToggleFeatures =
7087 EnableFeature
7088 ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
7089 : STI.ToggleFeature(Features & Extension.Features);
7090 setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7091 FoundExtension = true;
7092
7093 break;
7094 }
7095
7096 if (!FoundExtension)
7097 Error(CurLoc, "unsupported architectural extension");
7098
7099 CurLoc = incrementLoc(CurLoc, Name.size());
7100 }
7101 return false;
7102 }
7103
7104 /// parseDirectiveInst
7105 /// ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)7106 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7107 if (getLexer().is(AsmToken::EndOfStatement))
7108 return Error(Loc, "expected expression following '.inst' directive");
7109
7110 auto parseOp = [&]() -> bool {
7111 SMLoc L = getLoc();
7112 const MCExpr *Expr = nullptr;
7113 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7114 return true;
7115 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7116 if (check(!Value, L, "expected constant expression"))
7117 return true;
7118 getTargetStreamer().emitInst(Value->getValue());
7119 return false;
7120 };
7121
7122 return parseMany(parseOp);
7123 }
7124
7125 // parseDirectiveTLSDescCall:
7126 // ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)7127 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7128 StringRef Name;
7129 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7130 parseToken(AsmToken::EndOfStatement))
7131 return true;
7132
7133 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7134 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7135 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7136
7137 MCInst Inst;
7138 Inst.setOpcode(AArch64::TLSDESCCALL);
7139 Inst.addOperand(MCOperand::createExpr(Expr));
7140
7141 getParser().getStreamer().emitInstruction(Inst, getSTI());
7142 return false;
7143 }
7144
7145 /// ::= .loh <lohName | lohId> label1, ..., labelN
7146 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)7147 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7148 MCLOHType Kind;
7149 if (getTok().isNot(AsmToken::Identifier)) {
7150 if (getTok().isNot(AsmToken::Integer))
7151 return TokError("expected an identifier or a number in directive");
7152 // We successfully get a numeric value for the identifier.
7153 // Check if it is valid.
7154 int64_t Id = getTok().getIntVal();
7155 if (Id <= -1U && !isValidMCLOHType(Id))
7156 return TokError("invalid numeric identifier in directive");
7157 Kind = (MCLOHType)Id;
7158 } else {
7159 StringRef Name = getTok().getIdentifier();
7160 // We successfully parse an identifier.
7161 // Check if it is a recognized one.
7162 int Id = MCLOHNameToId(Name);
7163
7164 if (Id == -1)
7165 return TokError("invalid identifier in directive");
7166 Kind = (MCLOHType)Id;
7167 }
7168 // Consume the identifier.
7169 Lex();
7170 // Get the number of arguments of this LOH.
7171 int NbArgs = MCLOHIdToNbArgs(Kind);
7172
7173 assert(NbArgs != -1 && "Invalid number of arguments");
7174
7175 SmallVector<MCSymbol *, 3> Args;
7176 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7177 StringRef Name;
7178 if (getParser().parseIdentifier(Name))
7179 return TokError("expected identifier in directive");
7180 Args.push_back(getContext().getOrCreateSymbol(Name));
7181
7182 if (Idx + 1 == NbArgs)
7183 break;
7184 if (parseComma())
7185 return true;
7186 }
7187 if (parseEOL())
7188 return true;
7189
7190 getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7191 return false;
7192 }
7193
7194 /// parseDirectiveLtorg
7195 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)7196 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7197 if (parseEOL())
7198 return true;
7199 getTargetStreamer().emitCurrentConstantPool();
7200 return false;
7201 }
7202
7203 /// parseDirectiveReq
7204 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)7205 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7206 Lex(); // Eat the '.req' token.
7207 SMLoc SRegLoc = getLoc();
7208 RegKind RegisterKind = RegKind::Scalar;
7209 MCRegister RegNum;
7210 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7211
7212 if (!ParseRes.isSuccess()) {
7213 StringRef Kind;
7214 RegisterKind = RegKind::NeonVector;
7215 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7216
7217 if (ParseRes.isFailure())
7218 return true;
7219
7220 if (ParseRes.isSuccess() && !Kind.empty())
7221 return Error(SRegLoc, "vector register without type specifier expected");
7222 }
7223
7224 if (!ParseRes.isSuccess()) {
7225 StringRef Kind;
7226 RegisterKind = RegKind::SVEDataVector;
7227 ParseRes =
7228 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7229
7230 if (ParseRes.isFailure())
7231 return true;
7232
7233 if (ParseRes.isSuccess() && !Kind.empty())
7234 return Error(SRegLoc,
7235 "sve vector register without type specifier expected");
7236 }
7237
7238 if (!ParseRes.isSuccess()) {
7239 StringRef Kind;
7240 RegisterKind = RegKind::SVEPredicateVector;
7241 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7242
7243 if (ParseRes.isFailure())
7244 return true;
7245
7246 if (ParseRes.isSuccess() && !Kind.empty())
7247 return Error(SRegLoc,
7248 "sve predicate register without type specifier expected");
7249 }
7250
7251 if (!ParseRes.isSuccess())
7252 return Error(SRegLoc, "register name or alias expected");
7253
7254 // Shouldn't be anything else.
7255 if (parseEOL())
7256 return true;
7257
7258 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7259 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7260 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7261
7262 return false;
7263 }
7264
7265 /// parseDirectiveUneq
7266 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)7267 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7268 if (getTok().isNot(AsmToken::Identifier))
7269 return TokError("unexpected input in .unreq directive.");
7270 RegisterReqs.erase(getTok().getIdentifier().lower());
7271 Lex(); // Eat the identifier.
7272 return parseToken(AsmToken::EndOfStatement);
7273 }
7274
parseDirectiveCFINegateRAState()7275 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7276 if (parseEOL())
7277 return true;
7278 getStreamer().emitCFINegateRAState();
7279 return false;
7280 }
7281
7282 /// parseDirectiveCFIBKeyFrame
7283 /// ::= .cfi_b_key
parseDirectiveCFIBKeyFrame()7284 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7285 if (parseEOL())
7286 return true;
7287 getStreamer().emitCFIBKeyFrame();
7288 return false;
7289 }
7290
7291 /// parseDirectiveCFIMTETaggedFrame
7292 /// ::= .cfi_mte_tagged_frame
parseDirectiveCFIMTETaggedFrame()7293 bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7294 if (parseEOL())
7295 return true;
7296 getStreamer().emitCFIMTETaggedFrame();
7297 return false;
7298 }
7299
7300 /// parseDirectiveVariantPCS
7301 /// ::= .variant_pcs symbolname
parseDirectiveVariantPCS(SMLoc L)7302 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7303 StringRef Name;
7304 if (getParser().parseIdentifier(Name))
7305 return TokError("expected symbol name");
7306 if (parseEOL())
7307 return true;
7308 getTargetStreamer().emitDirectiveVariantPCS(
7309 getContext().getOrCreateSymbol(Name));
7310 return false;
7311 }
7312
7313 /// parseDirectiveSEHAllocStack
7314 /// ::= .seh_stackalloc
parseDirectiveSEHAllocStack(SMLoc L)7315 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7316 int64_t Size;
7317 if (parseImmExpr(Size))
7318 return true;
7319 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7320 return false;
7321 }
7322
7323 /// parseDirectiveSEHPrologEnd
7324 /// ::= .seh_endprologue
parseDirectiveSEHPrologEnd(SMLoc L)7325 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7326 getTargetStreamer().emitARM64WinCFIPrologEnd();
7327 return false;
7328 }
7329
7330 /// parseDirectiveSEHSaveR19R20X
7331 /// ::= .seh_save_r19r20_x
parseDirectiveSEHSaveR19R20X(SMLoc L)7332 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7333 int64_t Offset;
7334 if (parseImmExpr(Offset))
7335 return true;
7336 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7337 return false;
7338 }
7339
7340 /// parseDirectiveSEHSaveFPLR
7341 /// ::= .seh_save_fplr
parseDirectiveSEHSaveFPLR(SMLoc L)7342 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7343 int64_t Offset;
7344 if (parseImmExpr(Offset))
7345 return true;
7346 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7347 return false;
7348 }
7349
7350 /// parseDirectiveSEHSaveFPLRX
7351 /// ::= .seh_save_fplr_x
parseDirectiveSEHSaveFPLRX(SMLoc L)7352 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7353 int64_t Offset;
7354 if (parseImmExpr(Offset))
7355 return true;
7356 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7357 return false;
7358 }
7359
7360 /// parseDirectiveSEHSaveReg
7361 /// ::= .seh_save_reg
parseDirectiveSEHSaveReg(SMLoc L)7362 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7363 unsigned Reg;
7364 int64_t Offset;
7365 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7366 parseComma() || parseImmExpr(Offset))
7367 return true;
7368 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7369 return false;
7370 }
7371
7372 /// parseDirectiveSEHSaveRegX
7373 /// ::= .seh_save_reg_x
parseDirectiveSEHSaveRegX(SMLoc L)7374 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7375 unsigned Reg;
7376 int64_t Offset;
7377 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7378 parseComma() || parseImmExpr(Offset))
7379 return true;
7380 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7381 return false;
7382 }
7383
7384 /// parseDirectiveSEHSaveRegP
7385 /// ::= .seh_save_regp
parseDirectiveSEHSaveRegP(SMLoc L)7386 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7387 unsigned Reg;
7388 int64_t Offset;
7389 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7390 parseComma() || parseImmExpr(Offset))
7391 return true;
7392 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7393 return false;
7394 }
7395
7396 /// parseDirectiveSEHSaveRegPX
7397 /// ::= .seh_save_regp_x
parseDirectiveSEHSaveRegPX(SMLoc L)7398 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7399 unsigned Reg;
7400 int64_t Offset;
7401 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7402 parseComma() || parseImmExpr(Offset))
7403 return true;
7404 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7405 return false;
7406 }
7407
7408 /// parseDirectiveSEHSaveLRPair
7409 /// ::= .seh_save_lrpair
parseDirectiveSEHSaveLRPair(SMLoc L)7410 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7411 unsigned Reg;
7412 int64_t Offset;
7413 L = getLoc();
7414 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7415 parseComma() || parseImmExpr(Offset))
7416 return true;
7417 if (check(((Reg - 19) % 2 != 0), L,
7418 "expected register with even offset from x19"))
7419 return true;
7420 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7421 return false;
7422 }
7423
7424 /// parseDirectiveSEHSaveFReg
7425 /// ::= .seh_save_freg
parseDirectiveSEHSaveFReg(SMLoc L)7426 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7427 unsigned Reg;
7428 int64_t Offset;
7429 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7430 parseComma() || parseImmExpr(Offset))
7431 return true;
7432 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7433 return false;
7434 }
7435
7436 /// parseDirectiveSEHSaveFRegX
7437 /// ::= .seh_save_freg_x
parseDirectiveSEHSaveFRegX(SMLoc L)7438 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7439 unsigned Reg;
7440 int64_t Offset;
7441 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7442 parseComma() || parseImmExpr(Offset))
7443 return true;
7444 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7445 return false;
7446 }
7447
7448 /// parseDirectiveSEHSaveFRegP
7449 /// ::= .seh_save_fregp
parseDirectiveSEHSaveFRegP(SMLoc L)7450 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7451 unsigned Reg;
7452 int64_t Offset;
7453 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7454 parseComma() || parseImmExpr(Offset))
7455 return true;
7456 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7457 return false;
7458 }
7459
7460 /// parseDirectiveSEHSaveFRegPX
7461 /// ::= .seh_save_fregp_x
parseDirectiveSEHSaveFRegPX(SMLoc L)7462 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7463 unsigned Reg;
7464 int64_t Offset;
7465 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7466 parseComma() || parseImmExpr(Offset))
7467 return true;
7468 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7469 return false;
7470 }
7471
7472 /// parseDirectiveSEHSetFP
7473 /// ::= .seh_set_fp
parseDirectiveSEHSetFP(SMLoc L)7474 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7475 getTargetStreamer().emitARM64WinCFISetFP();
7476 return false;
7477 }
7478
7479 /// parseDirectiveSEHAddFP
7480 /// ::= .seh_add_fp
parseDirectiveSEHAddFP(SMLoc L)7481 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7482 int64_t Size;
7483 if (parseImmExpr(Size))
7484 return true;
7485 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7486 return false;
7487 }
7488
7489 /// parseDirectiveSEHNop
7490 /// ::= .seh_nop
parseDirectiveSEHNop(SMLoc L)7491 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7492 getTargetStreamer().emitARM64WinCFINop();
7493 return false;
7494 }
7495
7496 /// parseDirectiveSEHSaveNext
7497 /// ::= .seh_save_next
parseDirectiveSEHSaveNext(SMLoc L)7498 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7499 getTargetStreamer().emitARM64WinCFISaveNext();
7500 return false;
7501 }
7502
7503 /// parseDirectiveSEHEpilogStart
7504 /// ::= .seh_startepilogue
parseDirectiveSEHEpilogStart(SMLoc L)7505 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7506 getTargetStreamer().emitARM64WinCFIEpilogStart();
7507 return false;
7508 }
7509
7510 /// parseDirectiveSEHEpilogEnd
7511 /// ::= .seh_endepilogue
parseDirectiveSEHEpilogEnd(SMLoc L)7512 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7513 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7514 return false;
7515 }
7516
7517 /// parseDirectiveSEHTrapFrame
7518 /// ::= .seh_trap_frame
parseDirectiveSEHTrapFrame(SMLoc L)7519 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7520 getTargetStreamer().emitARM64WinCFITrapFrame();
7521 return false;
7522 }
7523
7524 /// parseDirectiveSEHMachineFrame
7525 /// ::= .seh_pushframe
parseDirectiveSEHMachineFrame(SMLoc L)7526 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7527 getTargetStreamer().emitARM64WinCFIMachineFrame();
7528 return false;
7529 }
7530
7531 /// parseDirectiveSEHContext
7532 /// ::= .seh_context
parseDirectiveSEHContext(SMLoc L)7533 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7534 getTargetStreamer().emitARM64WinCFIContext();
7535 return false;
7536 }
7537
7538 /// parseDirectiveSEHECContext
7539 /// ::= .seh_ec_context
parseDirectiveSEHECContext(SMLoc L)7540 bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7541 getTargetStreamer().emitARM64WinCFIECContext();
7542 return false;
7543 }
7544
7545 /// parseDirectiveSEHClearUnwoundToCall
7546 /// ::= .seh_clear_unwound_to_call
parseDirectiveSEHClearUnwoundToCall(SMLoc L)7547 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7548 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7549 return false;
7550 }
7551
7552 /// parseDirectiveSEHPACSignLR
7553 /// ::= .seh_pac_sign_lr
parseDirectiveSEHPACSignLR(SMLoc L)7554 bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7555 getTargetStreamer().emitARM64WinCFIPACSignLR();
7556 return false;
7557 }
7558
7559 /// parseDirectiveSEHSaveAnyReg
7560 /// ::= .seh_save_any_reg
7561 /// ::= .seh_save_any_reg_p
7562 /// ::= .seh_save_any_reg_x
7563 /// ::= .seh_save_any_reg_px
parseDirectiveSEHSaveAnyReg(SMLoc L,bool Paired,bool Writeback)7564 bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7565 bool Writeback) {
7566 MCRegister Reg;
7567 SMLoc Start, End;
7568 int64_t Offset;
7569 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7570 parseComma() || parseImmExpr(Offset))
7571 return true;
7572
7573 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7574 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7575 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7576 return Error(L, "invalid save_any_reg offset");
7577 unsigned EncodedReg;
7578 if (Reg == AArch64::FP)
7579 EncodedReg = 29;
7580 else if (Reg == AArch64::LR)
7581 EncodedReg = 30;
7582 else
7583 EncodedReg = Reg - AArch64::X0;
7584 if (Paired) {
7585 if (Reg == AArch64::LR)
7586 return Error(Start, "lr cannot be paired with another register");
7587 if (Writeback)
7588 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7589 else
7590 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7591 } else {
7592 if (Writeback)
7593 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7594 else
7595 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7596 }
7597 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7598 unsigned EncodedReg = Reg - AArch64::D0;
7599 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7600 return Error(L, "invalid save_any_reg offset");
7601 if (Paired) {
7602 if (Reg == AArch64::D31)
7603 return Error(Start, "d31 cannot be paired with another register");
7604 if (Writeback)
7605 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7606 else
7607 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7608 } else {
7609 if (Writeback)
7610 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7611 else
7612 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7613 }
7614 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7615 unsigned EncodedReg = Reg - AArch64::Q0;
7616 if (Offset < 0 || Offset % 16)
7617 return Error(L, "invalid save_any_reg offset");
7618 if (Paired) {
7619 if (Reg == AArch64::Q31)
7620 return Error(Start, "q31 cannot be paired with another register");
7621 if (Writeback)
7622 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7623 else
7624 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7625 } else {
7626 if (Writeback)
7627 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7628 else
7629 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7630 }
7631 } else {
7632 return Error(Start, "save_any_reg register must be x, q or d register");
7633 }
7634 return false;
7635 }
7636
parsePrimaryExpr(const MCExpr * & Res,SMLoc & EndLoc)7637 bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7638 // Try @AUTH expressions: they're more complex than the usual symbol variants.
7639 if (!parseAuthExpr(Res, EndLoc))
7640 return false;
7641 return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7642 }
7643
7644 /// parseAuthExpr
7645 /// ::= _sym@AUTH(ib,123[,addr])
7646 /// ::= (_sym + 5)@AUTH(ib,123[,addr])
7647 /// ::= (_sym - 5)@AUTH(ib,123[,addr])
parseAuthExpr(const MCExpr * & Res,SMLoc & EndLoc)7648 bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7649 MCAsmParser &Parser = getParser();
7650 MCContext &Ctx = getContext();
7651
7652 AsmToken Tok = Parser.getTok();
7653
7654 // Look for '_sym@AUTH' ...
7655 if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7656 StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7657 if (SymName.contains('@'))
7658 return TokError(
7659 "combination of @AUTH with other modifiers not supported");
7660 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7661
7662 Parser.Lex(); // Eat the identifier.
7663 } else {
7664 // ... or look for a more complex symbol reference, such as ...
7665 SmallVector<AsmToken, 6> Tokens;
7666
7667 // ... '"_long sym"@AUTH' ...
7668 if (Tok.is(AsmToken::String))
7669 Tokens.resize(2);
7670 // ... or '(_sym + 5)@AUTH'.
7671 else if (Tok.is(AsmToken::LParen))
7672 Tokens.resize(6);
7673 else
7674 return true;
7675
7676 if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7677 return true;
7678
7679 // In either case, the expression ends with '@' 'AUTH'.
7680 if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7681 Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7682 Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7683 return true;
7684
7685 if (Tok.is(AsmToken::String)) {
7686 StringRef SymName;
7687 if (Parser.parseIdentifier(SymName))
7688 return true;
7689 Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7690 } else {
7691 if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7692 return true;
7693 }
7694
7695 Parser.Lex(); // '@'
7696 Parser.Lex(); // 'AUTH'
7697 }
7698
7699 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7700 if (parseToken(AsmToken::LParen, "expected '('"))
7701 return true;
7702
7703 if (Parser.getTok().isNot(AsmToken::Identifier))
7704 return TokError("expected key name");
7705
7706 StringRef KeyStr = Parser.getTok().getIdentifier();
7707 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7708 if (!KeyIDOrNone)
7709 return TokError("invalid key '" + KeyStr + "'");
7710 Parser.Lex();
7711
7712 if (parseToken(AsmToken::Comma, "expected ','"))
7713 return true;
7714
7715 if (Parser.getTok().isNot(AsmToken::Integer))
7716 return TokError("expected integer discriminator");
7717 int64_t Discriminator = Parser.getTok().getIntVal();
7718
7719 if (!isUInt<16>(Discriminator))
7720 return TokError("integer discriminator " + Twine(Discriminator) +
7721 " out of range [0, 0xFFFF]");
7722 Parser.Lex();
7723
7724 bool UseAddressDiversity = false;
7725 if (Parser.getTok().is(AsmToken::Comma)) {
7726 Parser.Lex();
7727 if (Parser.getTok().isNot(AsmToken::Identifier) ||
7728 Parser.getTok().getIdentifier() != "addr")
7729 return TokError("expected 'addr'");
7730 UseAddressDiversity = true;
7731 Parser.Lex();
7732 }
7733
7734 EndLoc = Parser.getTok().getEndLoc();
7735 if (parseToken(AsmToken::RParen, "expected ')'"))
7736 return true;
7737
7738 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7739 UseAddressDiversity, Ctx);
7740 return false;
7741 }
7742
7743 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)7744 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7745 AArch64MCExpr::VariantKind &ELFRefKind,
7746 MCSymbolRefExpr::VariantKind &DarwinRefKind,
7747 int64_t &Addend) {
7748 ELFRefKind = AArch64MCExpr::VK_INVALID;
7749 DarwinRefKind = MCSymbolRefExpr::VK_None;
7750 Addend = 0;
7751
7752 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7753 ELFRefKind = AE->getKind();
7754 Expr = AE->getSubExpr();
7755 }
7756
7757 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7758 if (SE) {
7759 // It's a simple symbol reference with no addend.
7760 DarwinRefKind = SE->getKind();
7761 return true;
7762 }
7763
7764 // Check that it looks like a symbol + an addend
7765 MCValue Res;
7766 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7767 if (!Relocatable || Res.getSymB())
7768 return false;
7769
7770 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7771 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7772 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7773 return false;
7774
7775 if (Res.getSymA())
7776 DarwinRefKind = Res.getSymA()->getKind();
7777 Addend = Res.getConstant();
7778
7779 // It's some symbol reference + a constant addend, but really
7780 // shouldn't use both Darwin and ELF syntax.
7781 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7782 DarwinRefKind == MCSymbolRefExpr::VK_None;
7783 }
7784
7785 /// Force static initialization.
LLVMInitializeAArch64AsmParser()7786 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
7787 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
7788 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
7789 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
7790 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
7791 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
7792 }
7793
7794 #define GET_REGISTER_MATCHER
7795 #define GET_SUBTARGET_FEATURE_NAME
7796 #define GET_MATCHER_IMPLEMENTATION
7797 #define GET_MNEMONIC_SPELL_CHECKER
7798 #include "AArch64GenAsmMatcher.inc"
7799
7800 // Define this matcher function after the auto-generated include so we
7801 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)7802 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7803 unsigned Kind) {
7804 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7805
7806 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7807 if (!Op.isImm())
7808 return Match_InvalidOperand;
7809 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7810 if (!CE)
7811 return Match_InvalidOperand;
7812 if (CE->getValue() == ExpectedVal)
7813 return Match_Success;
7814 return Match_InvalidOperand;
7815 };
7816
7817 switch (Kind) {
7818 default:
7819 return Match_InvalidOperand;
7820 case MCK_MPR:
7821 // If the Kind is a token for the MPR register class which has the "za"
7822 // register (SME accumulator array), check if the asm is a literal "za"
7823 // token. This is for the "smstart za" alias that defines the register
7824 // as a literal token.
7825 if (Op.isTokenEqual("za"))
7826 return Match_Success;
7827 return Match_InvalidOperand;
7828
7829 // If the kind is a token for a literal immediate, check if our asm operand
7830 // matches. This is for InstAliases which have a fixed-value immediate in
7831 // the asm string, such as hints which are parsed into a specific
7832 // instruction definition.
7833 #define MATCH_HASH(N) \
7834 case MCK__HASH_##N: \
7835 return MatchesOpImmediate(N);
7836 MATCH_HASH(0)
7837 MATCH_HASH(1)
7838 MATCH_HASH(2)
7839 MATCH_HASH(3)
7840 MATCH_HASH(4)
7841 MATCH_HASH(6)
7842 MATCH_HASH(7)
7843 MATCH_HASH(8)
7844 MATCH_HASH(10)
7845 MATCH_HASH(12)
7846 MATCH_HASH(14)
7847 MATCH_HASH(16)
7848 MATCH_HASH(24)
7849 MATCH_HASH(25)
7850 MATCH_HASH(26)
7851 MATCH_HASH(27)
7852 MATCH_HASH(28)
7853 MATCH_HASH(29)
7854 MATCH_HASH(30)
7855 MATCH_HASH(31)
7856 MATCH_HASH(32)
7857 MATCH_HASH(40)
7858 MATCH_HASH(48)
7859 MATCH_HASH(64)
7860 #undef MATCH_HASH
7861 #define MATCH_HASH_MINUS(N) \
7862 case MCK__HASH__MINUS_##N: \
7863 return MatchesOpImmediate(-N);
7864 MATCH_HASH_MINUS(4)
7865 MATCH_HASH_MINUS(8)
7866 MATCH_HASH_MINUS(16)
7867 #undef MATCH_HASH_MINUS
7868 }
7869 }
7870
tryParseGPRSeqPair(OperandVector & Operands)7871 ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7872
7873 SMLoc S = getLoc();
7874
7875 if (getTok().isNot(AsmToken::Identifier))
7876 return Error(S, "expected register");
7877
7878 MCRegister FirstReg;
7879 ParseStatus Res = tryParseScalarRegister(FirstReg);
7880 if (!Res.isSuccess())
7881 return Error(S, "expected first even register of a consecutive same-size "
7882 "even/odd register pair");
7883
7884 const MCRegisterClass &WRegClass =
7885 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7886 const MCRegisterClass &XRegClass =
7887 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7888
7889 bool isXReg = XRegClass.contains(FirstReg),
7890 isWReg = WRegClass.contains(FirstReg);
7891 if (!isXReg && !isWReg)
7892 return Error(S, "expected first even register of a consecutive same-size "
7893 "even/odd register pair");
7894
7895 const MCRegisterInfo *RI = getContext().getRegisterInfo();
7896 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7897
7898 if (FirstEncoding & 0x1)
7899 return Error(S, "expected first even register of a consecutive same-size "
7900 "even/odd register pair");
7901
7902 if (getTok().isNot(AsmToken::Comma))
7903 return Error(getLoc(), "expected comma");
7904 // Eat the comma
7905 Lex();
7906
7907 SMLoc E = getLoc();
7908 MCRegister SecondReg;
7909 Res = tryParseScalarRegister(SecondReg);
7910 if (!Res.isSuccess())
7911 return Error(E, "expected second odd register of a consecutive same-size "
7912 "even/odd register pair");
7913
7914 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7915 (isXReg && !XRegClass.contains(SecondReg)) ||
7916 (isWReg && !WRegClass.contains(SecondReg)))
7917 return Error(E, "expected second odd register of a consecutive same-size "
7918 "even/odd register pair");
7919
7920 unsigned Pair = 0;
7921 if (isXReg) {
7922 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7923 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7924 } else {
7925 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7926 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7927 }
7928
7929 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7930 getLoc(), getContext()));
7931
7932 return ParseStatus::Success;
7933 }
7934
7935 template <bool ParseShiftExtend, bool ParseSuffix>
tryParseSVEDataVector(OperandVector & Operands)7936 ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7937 const SMLoc S = getLoc();
7938 // Check for a SVE vector register specifier first.
7939 MCRegister RegNum;
7940 StringRef Kind;
7941
7942 ParseStatus Res =
7943 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7944
7945 if (!Res.isSuccess())
7946 return Res;
7947
7948 if (ParseSuffix && Kind.empty())
7949 return ParseStatus::NoMatch;
7950
7951 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7952 if (!KindRes)
7953 return ParseStatus::NoMatch;
7954
7955 unsigned ElementWidth = KindRes->second;
7956
7957 // No shift/extend is the default.
7958 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7959 Operands.push_back(AArch64Operand::CreateVectorReg(
7960 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7961
7962 ParseStatus Res = tryParseVectorIndex(Operands);
7963 if (Res.isFailure())
7964 return ParseStatus::Failure;
7965 return ParseStatus::Success;
7966 }
7967
7968 // Eat the comma
7969 Lex();
7970
7971 // Match the shift
7972 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
7973 Res = tryParseOptionalShiftExtend(ExtOpnd);
7974 if (!Res.isSuccess())
7975 return Res;
7976
7977 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7978 Operands.push_back(AArch64Operand::CreateVectorReg(
7979 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7980 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7981 Ext->hasShiftExtendAmount()));
7982
7983 return ParseStatus::Success;
7984 }
7985
tryParseSVEPattern(OperandVector & Operands)7986 ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7987 MCAsmParser &Parser = getParser();
7988
7989 SMLoc SS = getLoc();
7990 const AsmToken &TokE = getTok();
7991 bool IsHash = TokE.is(AsmToken::Hash);
7992
7993 if (!IsHash && TokE.isNot(AsmToken::Identifier))
7994 return ParseStatus::NoMatch;
7995
7996 int64_t Pattern;
7997 if (IsHash) {
7998 Lex(); // Eat hash
7999
8000 // Parse the immediate operand.
8001 const MCExpr *ImmVal;
8002 SS = getLoc();
8003 if (Parser.parseExpression(ImmVal))
8004 return ParseStatus::Failure;
8005
8006 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8007 if (!MCE)
8008 return TokError("invalid operand for instruction");
8009
8010 Pattern = MCE->getValue();
8011 } else {
8012 // Parse the pattern
8013 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8014 if (!Pat)
8015 return ParseStatus::NoMatch;
8016
8017 Lex();
8018 Pattern = Pat->Encoding;
8019 assert(Pattern >= 0 && Pattern < 32);
8020 }
8021
8022 Operands.push_back(
8023 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8024 SS, getLoc(), getContext()));
8025
8026 return ParseStatus::Success;
8027 }
8028
8029 ParseStatus
tryParseSVEVecLenSpecifier(OperandVector & Operands)8030 AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8031 int64_t Pattern;
8032 SMLoc SS = getLoc();
8033 const AsmToken &TokE = getTok();
8034 // Parse the pattern
8035 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8036 TokE.getString());
8037 if (!Pat)
8038 return ParseStatus::NoMatch;
8039
8040 Lex();
8041 Pattern = Pat->Encoding;
8042 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8043
8044 Operands.push_back(
8045 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8046 SS, getLoc(), getContext()));
8047
8048 return ParseStatus::Success;
8049 }
8050
tryParseGPR64x8(OperandVector & Operands)8051 ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8052 SMLoc SS = getLoc();
8053
8054 MCRegister XReg;
8055 if (!tryParseScalarRegister(XReg).isSuccess())
8056 return ParseStatus::NoMatch;
8057
8058 MCContext &ctx = getContext();
8059 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8060 int X8Reg = RI->getMatchingSuperReg(
8061 XReg, AArch64::x8sub_0,
8062 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8063 if (!X8Reg)
8064 return Error(SS,
8065 "expected an even-numbered x-register in the range [x0,x22]");
8066
8067 Operands.push_back(
8068 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8069 return ParseStatus::Success;
8070 }
8071
tryParseImmRange(OperandVector & Operands)8072 ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8073 SMLoc S = getLoc();
8074
8075 if (getTok().isNot(AsmToken::Integer))
8076 return ParseStatus::NoMatch;
8077
8078 if (getLexer().peekTok().isNot(AsmToken::Colon))
8079 return ParseStatus::NoMatch;
8080
8081 const MCExpr *ImmF;
8082 if (getParser().parseExpression(ImmF))
8083 return ParseStatus::NoMatch;
8084
8085 if (getTok().isNot(AsmToken::Colon))
8086 return ParseStatus::NoMatch;
8087
8088 Lex(); // Eat ':'
8089 if (getTok().isNot(AsmToken::Integer))
8090 return ParseStatus::NoMatch;
8091
8092 SMLoc E = getTok().getLoc();
8093 const MCExpr *ImmL;
8094 if (getParser().parseExpression(ImmL))
8095 return ParseStatus::NoMatch;
8096
8097 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8098 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8099
8100 Operands.push_back(
8101 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8102 return ParseStatus::Success;
8103 }
8104