1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/AArch64AddressingModes.h" 10 #include "MCTargetDesc/AArch64MCExpr.h" 11 #include "MCTargetDesc/AArch64MCTargetDesc.h" 12 #include "MCTargetDesc/AArch64TargetStreamer.h" 13 #include "TargetInfo/AArch64TargetInfo.h" 14 #include "AArch64InstrInfo.h" 15 #include "Utils/AArch64BaseInfo.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/ADT/StringMap.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/StringSwitch.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/MC/MCContext.h" 27 #include "llvm/MC/MCExpr.h" 28 #include "llvm/MC/MCInst.h" 29 #include "llvm/MC/MCLinkerOptimizationHint.h" 30 #include "llvm/MC/MCObjectFileInfo.h" 31 #include "llvm/MC/MCParser/MCAsmLexer.h" 32 #include "llvm/MC/MCParser/MCAsmParser.h" 33 #include "llvm/MC/MCParser/MCAsmParserExtension.h" 34 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 35 #include "llvm/MC/MCParser/MCTargetAsmParser.h" 36 #include "llvm/MC/MCRegisterInfo.h" 37 #include "llvm/MC/MCStreamer.h" 38 #include "llvm/MC/MCSubtargetInfo.h" 39 #include "llvm/MC/MCSymbol.h" 40 #include "llvm/MC/MCTargetOptions.h" 41 #include "llvm/MC/SubtargetFeature.h" 42 #include "llvm/MC/MCValue.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/Compiler.h" 45 #include "llvm/Support/ErrorHandling.h" 46 #include "llvm/Support/MathExtras.h" 47 #include "llvm/Support/SMLoc.h" 48 #include "llvm/Support/TargetParser.h" 49 #include "llvm/Support/TargetRegistry.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include <cassert> 52 #include <cctype> 53 #include <cstdint> 54 #include <cstdio> 55 #include <string> 56 #include <tuple> 57 #include <utility> 58 #include <vector> 59 60 using namespace llvm; 61 62 namespace { 63 64 enum class RegKind { 65 Scalar, 66 NeonVector, 67 SVEDataVector, 68 SVEPredicateVector 69 }; 70 71 enum RegConstraintEqualityTy { 72 EqualsReg, 73 EqualsSuperReg, 74 EqualsSubReg 75 }; 76 77 class AArch64AsmParser : public MCTargetAsmParser { 78 private: 79 StringRef Mnemonic; ///< Instruction mnemonic. 80 81 // Map of register aliases registers via the .req directive. 82 StringMap<std::pair<RegKind, unsigned>> RegisterReqs; 83 84 class PrefixInfo { 85 public: 86 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) { 87 PrefixInfo Prefix; 88 switch (Inst.getOpcode()) { 89 case AArch64::MOVPRFX_ZZ: 90 Prefix.Active = true; 91 Prefix.Dst = Inst.getOperand(0).getReg(); 92 break; 93 case AArch64::MOVPRFX_ZPmZ_B: 94 case AArch64::MOVPRFX_ZPmZ_H: 95 case AArch64::MOVPRFX_ZPmZ_S: 96 case AArch64::MOVPRFX_ZPmZ_D: 97 Prefix.Active = true; 98 Prefix.Predicated = true; 99 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; 100 assert(Prefix.ElementSize != AArch64::ElementSizeNone && 101 "No destructive element size set for movprfx"); 102 Prefix.Dst = Inst.getOperand(0).getReg(); 103 Prefix.Pg = Inst.getOperand(2).getReg(); 104 break; 105 case AArch64::MOVPRFX_ZPzZ_B: 106 case AArch64::MOVPRFX_ZPzZ_H: 107 case AArch64::MOVPRFX_ZPzZ_S: 108 case AArch64::MOVPRFX_ZPzZ_D: 109 Prefix.Active = true; 110 Prefix.Predicated = true; 111 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask; 112 assert(Prefix.ElementSize != AArch64::ElementSizeNone && 113 "No destructive element size set for movprfx"); 114 Prefix.Dst = Inst.getOperand(0).getReg(); 115 Prefix.Pg = Inst.getOperand(1).getReg(); 116 break; 117 default: 118 break; 119 } 120 121 return Prefix; 122 } 123 124 PrefixInfo() : Active(false), Predicated(false) {} 125 bool isActive() const { return Active; } 126 bool isPredicated() const { return Predicated; } 127 unsigned getElementSize() const { 128 assert(Predicated); 129 return ElementSize; 130 } 131 unsigned getDstReg() const { return Dst; } 132 unsigned getPgReg() const { 133 assert(Predicated); 134 return Pg; 135 } 136 137 private: 138 bool Active; 139 bool Predicated; 140 unsigned ElementSize; 141 unsigned Dst; 142 unsigned Pg; 143 } NextPrefix; 144 145 AArch64TargetStreamer &getTargetStreamer() { 146 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); 147 return static_cast<AArch64TargetStreamer &>(TS); 148 } 149 150 SMLoc getLoc() const { return getParser().getTok().getLoc(); } 151 152 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands); 153 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S); 154 AArch64CC::CondCode parseCondCodeString(StringRef Cond); 155 bool parseCondCode(OperandVector &Operands, bool invertCondCode); 156 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind); 157 bool parseRegister(OperandVector &Operands); 158 bool parseSymbolicImmVal(const MCExpr *&ImmVal); 159 bool parseNeonVectorList(OperandVector &Operands); 160 bool parseOptionalMulOperand(OperandVector &Operands); 161 bool parseOperand(OperandVector &Operands, bool isCondCode, 162 bool invertCondCode); 163 164 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo, 165 OperandVector &Operands); 166 167 bool parseDirectiveArch(SMLoc L); 168 bool parseDirectiveArchExtension(SMLoc L); 169 bool parseDirectiveCPU(SMLoc L); 170 bool parseDirectiveInst(SMLoc L); 171 172 bool parseDirectiveTLSDescCall(SMLoc L); 173 174 bool parseDirectiveLOH(StringRef LOH, SMLoc L); 175 bool parseDirectiveLtorg(SMLoc L); 176 177 bool parseDirectiveReq(StringRef Name, SMLoc L); 178 bool parseDirectiveUnreq(SMLoc L); 179 bool parseDirectiveCFINegateRAState(); 180 bool parseDirectiveCFIBKeyFrame(); 181 182 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc, 183 SmallVectorImpl<SMLoc> &Loc); 184 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 185 OperandVector &Operands, MCStreamer &Out, 186 uint64_t &ErrorInfo, 187 bool MatchingInlineAsm) override; 188 /// @name Auto-generated Match Functions 189 /// { 190 191 #define GET_ASSEMBLER_HEADER 192 #include "AArch64GenAsmMatcher.inc" 193 194 /// } 195 196 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg); 197 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind, 198 RegKind MatchKind); 199 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands); 200 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands); 201 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands); 202 OperandMatchResultTy tryParseSysReg(OperandVector &Operands); 203 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands); 204 template <bool IsSVEPrefetch = false> 205 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands); 206 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands); 207 OperandMatchResultTy tryParseBTIHint(OperandVector &Operands); 208 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands); 209 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands); 210 template<bool AddFPZeroAsLiteral> 211 OperandMatchResultTy tryParseFPImm(OperandVector &Operands); 212 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands); 213 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands); 214 bool tryParseNeonVectorRegister(OperandVector &Operands); 215 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands); 216 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands); 217 template <bool ParseShiftExtend, 218 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg> 219 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands); 220 template <bool ParseShiftExtend, bool ParseSuffix> 221 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands); 222 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands); 223 template <RegKind VectorKind> 224 OperandMatchResultTy tryParseVectorList(OperandVector &Operands, 225 bool ExpectMatch = false); 226 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands); 227 228 public: 229 enum AArch64MatchResultTy { 230 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY, 231 #define GET_OPERAND_DIAGNOSTIC_TYPES 232 #include "AArch64GenAsmMatcher.inc" 233 }; 234 bool IsILP32; 235 236 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, 237 const MCInstrInfo &MII, const MCTargetOptions &Options) 238 : MCTargetAsmParser(Options, STI, MII) { 239 IsILP32 = Options.getABIName() == "ilp32"; 240 MCAsmParserExtension::Initialize(Parser); 241 MCStreamer &S = getParser().getStreamer(); 242 if (S.getTargetStreamer() == nullptr) 243 new AArch64TargetStreamer(S); 244 245 // Alias .hword/.word/.[dx]word to the target-independent 246 // .2byte/.4byte/.8byte directives as they have the same form and 247 // semantics: 248 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ] 249 Parser.addAliasForDirective(".hword", ".2byte"); 250 Parser.addAliasForDirective(".word", ".4byte"); 251 Parser.addAliasForDirective(".dword", ".8byte"); 252 Parser.addAliasForDirective(".xword", ".8byte"); 253 254 // Initialize the set of available features. 255 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits())); 256 } 257 258 bool regsEqual(const MCParsedAsmOperand &Op1, 259 const MCParsedAsmOperand &Op2) const override; 260 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 261 SMLoc NameLoc, OperandVector &Operands) override; 262 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; 263 OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc, 264 SMLoc &EndLoc) override; 265 bool ParseDirective(AsmToken DirectiveID) override; 266 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, 267 unsigned Kind) override; 268 269 static bool classifySymbolRef(const MCExpr *Expr, 270 AArch64MCExpr::VariantKind &ELFRefKind, 271 MCSymbolRefExpr::VariantKind &DarwinRefKind, 272 int64_t &Addend); 273 }; 274 275 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine 276 /// instruction. 277 class AArch64Operand : public MCParsedAsmOperand { 278 private: 279 enum KindTy { 280 k_Immediate, 281 k_ShiftedImm, 282 k_CondCode, 283 k_Register, 284 k_VectorList, 285 k_VectorIndex, 286 k_Token, 287 k_SysReg, 288 k_SysCR, 289 k_Prefetch, 290 k_ShiftExtend, 291 k_FPImm, 292 k_Barrier, 293 k_PSBHint, 294 k_BTIHint, 295 } Kind; 296 297 SMLoc StartLoc, EndLoc; 298 299 struct TokOp { 300 const char *Data; 301 unsigned Length; 302 bool IsSuffix; // Is the operand actually a suffix on the mnemonic. 303 }; 304 305 // Separate shift/extend operand. 306 struct ShiftExtendOp { 307 AArch64_AM::ShiftExtendType Type; 308 unsigned Amount; 309 bool HasExplicitAmount; 310 }; 311 312 struct RegOp { 313 unsigned RegNum; 314 RegKind Kind; 315 int ElementWidth; 316 317 // The register may be allowed as a different register class, 318 // e.g. for GPR64as32 or GPR32as64. 319 RegConstraintEqualityTy EqualityTy; 320 321 // In some cases the shift/extend needs to be explicitly parsed together 322 // with the register, rather than as a separate operand. This is needed 323 // for addressing modes where the instruction as a whole dictates the 324 // scaling/extend, rather than specific bits in the instruction. 325 // By parsing them as a single operand, we avoid the need to pass an 326 // extra operand in all CodeGen patterns (because all operands need to 327 // have an associated value), and we avoid the need to update TableGen to 328 // accept operands that have no associated bits in the instruction. 329 // 330 // An added benefit of parsing them together is that the assembler 331 // can give a sensible diagnostic if the scaling is not correct. 332 // 333 // The default is 'lsl #0' (HasExplicitAmount = false) if no 334 // ShiftExtend is specified. 335 ShiftExtendOp ShiftExtend; 336 }; 337 338 struct VectorListOp { 339 unsigned RegNum; 340 unsigned Count; 341 unsigned NumElements; 342 unsigned ElementWidth; 343 RegKind RegisterKind; 344 }; 345 346 struct VectorIndexOp { 347 unsigned Val; 348 }; 349 350 struct ImmOp { 351 const MCExpr *Val; 352 }; 353 354 struct ShiftedImmOp { 355 const MCExpr *Val; 356 unsigned ShiftAmount; 357 }; 358 359 struct CondCodeOp { 360 AArch64CC::CondCode Code; 361 }; 362 363 struct FPImmOp { 364 uint64_t Val; // APFloat value bitcasted to uint64_t. 365 bool IsExact; // describes whether parsed value was exact. 366 }; 367 368 struct BarrierOp { 369 const char *Data; 370 unsigned Length; 371 unsigned Val; // Not the enum since not all values have names. 372 }; 373 374 struct SysRegOp { 375 const char *Data; 376 unsigned Length; 377 uint32_t MRSReg; 378 uint32_t MSRReg; 379 uint32_t PStateField; 380 }; 381 382 struct SysCRImmOp { 383 unsigned Val; 384 }; 385 386 struct PrefetchOp { 387 const char *Data; 388 unsigned Length; 389 unsigned Val; 390 }; 391 392 struct PSBHintOp { 393 const char *Data; 394 unsigned Length; 395 unsigned Val; 396 }; 397 398 struct BTIHintOp { 399 const char *Data; 400 unsigned Length; 401 unsigned Val; 402 }; 403 404 struct ExtendOp { 405 unsigned Val; 406 }; 407 408 union { 409 struct TokOp Tok; 410 struct RegOp Reg; 411 struct VectorListOp VectorList; 412 struct VectorIndexOp VectorIndex; 413 struct ImmOp Imm; 414 struct ShiftedImmOp ShiftedImm; 415 struct CondCodeOp CondCode; 416 struct FPImmOp FPImm; 417 struct BarrierOp Barrier; 418 struct SysRegOp SysReg; 419 struct SysCRImmOp SysCRImm; 420 struct PrefetchOp Prefetch; 421 struct PSBHintOp PSBHint; 422 struct BTIHintOp BTIHint; 423 struct ShiftExtendOp ShiftExtend; 424 }; 425 426 // Keep the MCContext around as the MCExprs may need manipulated during 427 // the add<>Operands() calls. 428 MCContext &Ctx; 429 430 public: 431 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {} 432 433 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) { 434 Kind = o.Kind; 435 StartLoc = o.StartLoc; 436 EndLoc = o.EndLoc; 437 switch (Kind) { 438 case k_Token: 439 Tok = o.Tok; 440 break; 441 case k_Immediate: 442 Imm = o.Imm; 443 break; 444 case k_ShiftedImm: 445 ShiftedImm = o.ShiftedImm; 446 break; 447 case k_CondCode: 448 CondCode = o.CondCode; 449 break; 450 case k_FPImm: 451 FPImm = o.FPImm; 452 break; 453 case k_Barrier: 454 Barrier = o.Barrier; 455 break; 456 case k_Register: 457 Reg = o.Reg; 458 break; 459 case k_VectorList: 460 VectorList = o.VectorList; 461 break; 462 case k_VectorIndex: 463 VectorIndex = o.VectorIndex; 464 break; 465 case k_SysReg: 466 SysReg = o.SysReg; 467 break; 468 case k_SysCR: 469 SysCRImm = o.SysCRImm; 470 break; 471 case k_Prefetch: 472 Prefetch = o.Prefetch; 473 break; 474 case k_PSBHint: 475 PSBHint = o.PSBHint; 476 break; 477 case k_BTIHint: 478 BTIHint = o.BTIHint; 479 break; 480 case k_ShiftExtend: 481 ShiftExtend = o.ShiftExtend; 482 break; 483 } 484 } 485 486 /// getStartLoc - Get the location of the first token of this operand. 487 SMLoc getStartLoc() const override { return StartLoc; } 488 /// getEndLoc - Get the location of the last token of this operand. 489 SMLoc getEndLoc() const override { return EndLoc; } 490 491 StringRef getToken() const { 492 assert(Kind == k_Token && "Invalid access!"); 493 return StringRef(Tok.Data, Tok.Length); 494 } 495 496 bool isTokenSuffix() const { 497 assert(Kind == k_Token && "Invalid access!"); 498 return Tok.IsSuffix; 499 } 500 501 const MCExpr *getImm() const { 502 assert(Kind == k_Immediate && "Invalid access!"); 503 return Imm.Val; 504 } 505 506 const MCExpr *getShiftedImmVal() const { 507 assert(Kind == k_ShiftedImm && "Invalid access!"); 508 return ShiftedImm.Val; 509 } 510 511 unsigned getShiftedImmShift() const { 512 assert(Kind == k_ShiftedImm && "Invalid access!"); 513 return ShiftedImm.ShiftAmount; 514 } 515 516 AArch64CC::CondCode getCondCode() const { 517 assert(Kind == k_CondCode && "Invalid access!"); 518 return CondCode.Code; 519 } 520 521 APFloat getFPImm() const { 522 assert (Kind == k_FPImm && "Invalid access!"); 523 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true)); 524 } 525 526 bool getFPImmIsExact() const { 527 assert (Kind == k_FPImm && "Invalid access!"); 528 return FPImm.IsExact; 529 } 530 531 unsigned getBarrier() const { 532 assert(Kind == k_Barrier && "Invalid access!"); 533 return Barrier.Val; 534 } 535 536 StringRef getBarrierName() const { 537 assert(Kind == k_Barrier && "Invalid access!"); 538 return StringRef(Barrier.Data, Barrier.Length); 539 } 540 541 unsigned getReg() const override { 542 assert(Kind == k_Register && "Invalid access!"); 543 return Reg.RegNum; 544 } 545 546 RegConstraintEqualityTy getRegEqualityTy() const { 547 assert(Kind == k_Register && "Invalid access!"); 548 return Reg.EqualityTy; 549 } 550 551 unsigned getVectorListStart() const { 552 assert(Kind == k_VectorList && "Invalid access!"); 553 return VectorList.RegNum; 554 } 555 556 unsigned getVectorListCount() const { 557 assert(Kind == k_VectorList && "Invalid access!"); 558 return VectorList.Count; 559 } 560 561 unsigned getVectorIndex() const { 562 assert(Kind == k_VectorIndex && "Invalid access!"); 563 return VectorIndex.Val; 564 } 565 566 StringRef getSysReg() const { 567 assert(Kind == k_SysReg && "Invalid access!"); 568 return StringRef(SysReg.Data, SysReg.Length); 569 } 570 571 unsigned getSysCR() const { 572 assert(Kind == k_SysCR && "Invalid access!"); 573 return SysCRImm.Val; 574 } 575 576 unsigned getPrefetch() const { 577 assert(Kind == k_Prefetch && "Invalid access!"); 578 return Prefetch.Val; 579 } 580 581 unsigned getPSBHint() const { 582 assert(Kind == k_PSBHint && "Invalid access!"); 583 return PSBHint.Val; 584 } 585 586 StringRef getPSBHintName() const { 587 assert(Kind == k_PSBHint && "Invalid access!"); 588 return StringRef(PSBHint.Data, PSBHint.Length); 589 } 590 591 unsigned getBTIHint() const { 592 assert(Kind == k_BTIHint && "Invalid access!"); 593 return BTIHint.Val; 594 } 595 596 StringRef getBTIHintName() const { 597 assert(Kind == k_BTIHint && "Invalid access!"); 598 return StringRef(BTIHint.Data, BTIHint.Length); 599 } 600 601 StringRef getPrefetchName() const { 602 assert(Kind == k_Prefetch && "Invalid access!"); 603 return StringRef(Prefetch.Data, Prefetch.Length); 604 } 605 606 AArch64_AM::ShiftExtendType getShiftExtendType() const { 607 if (Kind == k_ShiftExtend) 608 return ShiftExtend.Type; 609 if (Kind == k_Register) 610 return Reg.ShiftExtend.Type; 611 llvm_unreachable("Invalid access!"); 612 } 613 614 unsigned getShiftExtendAmount() const { 615 if (Kind == k_ShiftExtend) 616 return ShiftExtend.Amount; 617 if (Kind == k_Register) 618 return Reg.ShiftExtend.Amount; 619 llvm_unreachable("Invalid access!"); 620 } 621 622 bool hasShiftExtendAmount() const { 623 if (Kind == k_ShiftExtend) 624 return ShiftExtend.HasExplicitAmount; 625 if (Kind == k_Register) 626 return Reg.ShiftExtend.HasExplicitAmount; 627 llvm_unreachable("Invalid access!"); 628 } 629 630 bool isImm() const override { return Kind == k_Immediate; } 631 bool isMem() const override { return false; } 632 633 bool isUImm6() const { 634 if (!isImm()) 635 return false; 636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 637 if (!MCE) 638 return false; 639 int64_t Val = MCE->getValue(); 640 return (Val >= 0 && Val < 64); 641 } 642 643 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); } 644 645 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const { 646 return isImmScaled<Bits, Scale>(true); 647 } 648 649 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const { 650 return isImmScaled<Bits, Scale>(false); 651 } 652 653 template <int Bits, int Scale> 654 DiagnosticPredicate isImmScaled(bool Signed) const { 655 if (!isImm()) 656 return DiagnosticPredicateTy::NoMatch; 657 658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!MCE) 660 return DiagnosticPredicateTy::NoMatch; 661 662 int64_t MinVal, MaxVal; 663 if (Signed) { 664 int64_t Shift = Bits - 1; 665 MinVal = (int64_t(1) << Shift) * -Scale; 666 MaxVal = ((int64_t(1) << Shift) - 1) * Scale; 667 } else { 668 MinVal = 0; 669 MaxVal = ((int64_t(1) << Bits) - 1) * Scale; 670 } 671 672 int64_t Val = MCE->getValue(); 673 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0) 674 return DiagnosticPredicateTy::Match; 675 676 return DiagnosticPredicateTy::NearMatch; 677 } 678 679 DiagnosticPredicate isSVEPattern() const { 680 if (!isImm()) 681 return DiagnosticPredicateTy::NoMatch; 682 auto *MCE = dyn_cast<MCConstantExpr>(getImm()); 683 if (!MCE) 684 return DiagnosticPredicateTy::NoMatch; 685 int64_t Val = MCE->getValue(); 686 if (Val >= 0 && Val < 32) 687 return DiagnosticPredicateTy::Match; 688 return DiagnosticPredicateTy::NearMatch; 689 } 690 691 bool isSymbolicUImm12Offset(const MCExpr *Expr) const { 692 AArch64MCExpr::VariantKind ELFRefKind; 693 MCSymbolRefExpr::VariantKind DarwinRefKind; 694 int64_t Addend; 695 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, 696 Addend)) { 697 // If we don't understand the expression, assume the best and 698 // let the fixup and relocation code deal with it. 699 return true; 700 } 701 702 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || 703 ELFRefKind == AArch64MCExpr::VK_LO12 || 704 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 || 705 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || 706 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || 707 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || 708 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || 709 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC || 710 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || 711 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || 712 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) { 713 // Note that we don't range-check the addend. It's adjusted modulo page 714 // size when converted, so there is no "out of range" condition when using 715 // @pageoff. 716 return true; 717 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF || 718 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) { 719 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend. 720 return Addend == 0; 721 } 722 723 return false; 724 } 725 726 template <int Scale> bool isUImm12Offset() const { 727 if (!isImm()) 728 return false; 729 730 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 731 if (!MCE) 732 return isSymbolicUImm12Offset(getImm()); 733 734 int64_t Val = MCE->getValue(); 735 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000; 736 } 737 738 template <int N, int M> 739 bool isImmInRange() const { 740 if (!isImm()) 741 return false; 742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 743 if (!MCE) 744 return false; 745 int64_t Val = MCE->getValue(); 746 return (Val >= N && Val <= M); 747 } 748 749 // NOTE: Also used for isLogicalImmNot as anything that can be represented as 750 // a logical immediate can always be represented when inverted. 751 template <typename T> 752 bool isLogicalImm() const { 753 if (!isImm()) 754 return false; 755 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 756 if (!MCE) 757 return false; 758 759 int64_t Val = MCE->getValue(); 760 // Avoid left shift by 64 directly. 761 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4); 762 // Allow all-0 or all-1 in top bits to permit bitwise NOT. 763 if ((Val & Upper) && (Val & Upper) != Upper) 764 return false; 765 766 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8); 767 } 768 769 bool isShiftedImm() const { return Kind == k_ShiftedImm; } 770 771 /// Returns the immediate value as a pair of (imm, shift) if the immediate is 772 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted 773 /// immediate that can be shifted by 'Shift'. 774 template <unsigned Width> 775 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const { 776 if (isShiftedImm() && Width == getShiftedImmShift()) 777 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal())) 778 return std::make_pair(CE->getValue(), Width); 779 780 if (isImm()) 781 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) { 782 int64_t Val = CE->getValue(); 783 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val)) 784 return std::make_pair(Val >> Width, Width); 785 else 786 return std::make_pair(Val, 0u); 787 } 788 789 return {}; 790 } 791 792 bool isAddSubImm() const { 793 if (!isShiftedImm() && !isImm()) 794 return false; 795 796 const MCExpr *Expr; 797 798 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'. 799 if (isShiftedImm()) { 800 unsigned Shift = ShiftedImm.ShiftAmount; 801 Expr = ShiftedImm.Val; 802 if (Shift != 0 && Shift != 12) 803 return false; 804 } else { 805 Expr = getImm(); 806 } 807 808 AArch64MCExpr::VariantKind ELFRefKind; 809 MCSymbolRefExpr::VariantKind DarwinRefKind; 810 int64_t Addend; 811 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, 812 DarwinRefKind, Addend)) { 813 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF 814 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF 815 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0) 816 || ELFRefKind == AArch64MCExpr::VK_LO12 817 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 818 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 819 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC 820 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 821 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 822 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC 823 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 824 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 825 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12; 826 } 827 828 // If it's a constant, it should be a real immediate in range. 829 if (auto ShiftedVal = getShiftedVal<12>()) 830 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff; 831 832 // If it's an expression, we hope for the best and let the fixup/relocation 833 // code deal with it. 834 return true; 835 } 836 837 bool isAddSubImmNeg() const { 838 if (!isShiftedImm() && !isImm()) 839 return false; 840 841 // Otherwise it should be a real negative immediate in range. 842 if (auto ShiftedVal = getShiftedVal<12>()) 843 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff; 844 845 return false; 846 } 847 848 // Signed value in the range -128 to +127. For element widths of 849 // 16 bits or higher it may also be a signed multiple of 256 in the 850 // range -32768 to +32512. 851 // For element-width of 8 bits a range of -128 to 255 is accepted, 852 // since a copy of a byte can be either signed/unsigned. 853 template <typename T> 854 DiagnosticPredicate isSVECpyImm() const { 855 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) 856 return DiagnosticPredicateTy::NoMatch; 857 858 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value; 859 if (auto ShiftedImm = getShiftedVal<8>()) 860 if (!(IsByte && ShiftedImm->second) && 861 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first) 862 << ShiftedImm->second)) 863 return DiagnosticPredicateTy::Match; 864 865 return DiagnosticPredicateTy::NearMatch; 866 } 867 868 // Unsigned value in the range 0 to 255. For element widths of 869 // 16 bits or higher it may also be a signed multiple of 256 in the 870 // range 0 to 65280. 871 template <typename T> DiagnosticPredicate isSVEAddSubImm() const { 872 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm()))) 873 return DiagnosticPredicateTy::NoMatch; 874 875 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value; 876 if (auto ShiftedImm = getShiftedVal<8>()) 877 if (!(IsByte && ShiftedImm->second) && 878 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first 879 << ShiftedImm->second)) 880 return DiagnosticPredicateTy::Match; 881 882 return DiagnosticPredicateTy::NearMatch; 883 } 884 885 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const { 886 if (isLogicalImm<T>() && !isSVECpyImm<T>()) 887 return DiagnosticPredicateTy::Match; 888 return DiagnosticPredicateTy::NoMatch; 889 } 890 891 bool isCondCode() const { return Kind == k_CondCode; } 892 893 bool isSIMDImmType10() const { 894 if (!isImm()) 895 return false; 896 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 897 if (!MCE) 898 return false; 899 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); 900 } 901 902 template<int N> 903 bool isBranchTarget() const { 904 if (!isImm()) 905 return false; 906 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 907 if (!MCE) 908 return true; 909 int64_t Val = MCE->getValue(); 910 if (Val & 0x3) 911 return false; 912 assert(N > 0 && "Branch target immediate cannot be 0 bits!"); 913 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2)); 914 } 915 916 bool 917 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const { 918 if (!isImm()) 919 return false; 920 921 AArch64MCExpr::VariantKind ELFRefKind; 922 MCSymbolRefExpr::VariantKind DarwinRefKind; 923 int64_t Addend; 924 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind, 925 DarwinRefKind, Addend)) { 926 return false; 927 } 928 if (DarwinRefKind != MCSymbolRefExpr::VK_None) 929 return false; 930 931 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) { 932 if (ELFRefKind == AllowedModifiers[i]) 933 return true; 934 } 935 936 return false; 937 } 938 939 bool isMovWSymbolG3() const { 940 return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3}); 941 } 942 943 bool isMovWSymbolG2() const { 944 return isMovWSymbol( 945 {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S, 946 AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2, 947 AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2, 948 AArch64MCExpr::VK_DTPREL_G2}); 949 } 950 951 bool isMovWSymbolG1() const { 952 return isMovWSymbol( 953 {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S, 954 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1, 955 AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1, 956 AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC, 957 AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC}); 958 } 959 960 bool isMovWSymbolG0() const { 961 return isMovWSymbol( 962 {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S, 963 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0, 964 AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC, 965 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC, 966 AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC}); 967 } 968 969 template<int RegWidth, int Shift> 970 bool isMOVZMovAlias() const { 971 if (!isImm()) return false; 972 973 const MCExpr *E = getImm(); 974 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) { 975 uint64_t Value = CE->getValue(); 976 977 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); 978 } 979 // Only supports the case of Shift being 0 if an expression is used as an 980 // operand 981 return !Shift && E; 982 } 983 984 template<int RegWidth, int Shift> 985 bool isMOVNMovAlias() const { 986 if (!isImm()) return false; 987 988 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 989 if (!CE) return false; 990 uint64_t Value = CE->getValue(); 991 992 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth); 993 } 994 995 bool isFPImm() const { 996 return Kind == k_FPImm && 997 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1; 998 } 999 1000 bool isBarrier() const { return Kind == k_Barrier; } 1001 bool isSysReg() const { return Kind == k_SysReg; } 1002 1003 bool isMRSSystemRegister() const { 1004 if (!isSysReg()) return false; 1005 1006 return SysReg.MRSReg != -1U; 1007 } 1008 1009 bool isMSRSystemRegister() const { 1010 if (!isSysReg()) return false; 1011 return SysReg.MSRReg != -1U; 1012 } 1013 1014 bool isSystemPStateFieldWithImm0_1() const { 1015 if (!isSysReg()) return false; 1016 return (SysReg.PStateField == AArch64PState::PAN || 1017 SysReg.PStateField == AArch64PState::DIT || 1018 SysReg.PStateField == AArch64PState::UAO || 1019 SysReg.PStateField == AArch64PState::SSBS); 1020 } 1021 1022 bool isSystemPStateFieldWithImm0_15() const { 1023 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false; 1024 return SysReg.PStateField != -1U; 1025 } 1026 1027 bool isReg() const override { 1028 return Kind == k_Register; 1029 } 1030 1031 bool isScalarReg() const { 1032 return Kind == k_Register && Reg.Kind == RegKind::Scalar; 1033 } 1034 1035 bool isNeonVectorReg() const { 1036 return Kind == k_Register && Reg.Kind == RegKind::NeonVector; 1037 } 1038 1039 bool isNeonVectorRegLo() const { 1040 return Kind == k_Register && Reg.Kind == RegKind::NeonVector && 1041 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains( 1042 Reg.RegNum) || 1043 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains( 1044 Reg.RegNum)); 1045 } 1046 1047 template <unsigned Class> bool isSVEVectorReg() const { 1048 RegKind RK; 1049 switch (Class) { 1050 case AArch64::ZPRRegClassID: 1051 case AArch64::ZPR_3bRegClassID: 1052 case AArch64::ZPR_4bRegClassID: 1053 RK = RegKind::SVEDataVector; 1054 break; 1055 case AArch64::PPRRegClassID: 1056 case AArch64::PPR_3bRegClassID: 1057 RK = RegKind::SVEPredicateVector; 1058 break; 1059 default: 1060 llvm_unreachable("Unsupport register class"); 1061 } 1062 1063 return (Kind == k_Register && Reg.Kind == RK) && 1064 AArch64MCRegisterClasses[Class].contains(getReg()); 1065 } 1066 1067 template <unsigned Class> bool isFPRasZPR() const { 1068 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1069 AArch64MCRegisterClasses[Class].contains(getReg()); 1070 } 1071 1072 template <int ElementWidth, unsigned Class> 1073 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const { 1074 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector) 1075 return DiagnosticPredicateTy::NoMatch; 1076 1077 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth)) 1078 return DiagnosticPredicateTy::Match; 1079 1080 return DiagnosticPredicateTy::NearMatch; 1081 } 1082 1083 template <int ElementWidth, unsigned Class> 1084 DiagnosticPredicate isSVEDataVectorRegOfWidth() const { 1085 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector) 1086 return DiagnosticPredicateTy::NoMatch; 1087 1088 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth) 1089 return DiagnosticPredicateTy::Match; 1090 1091 return DiagnosticPredicateTy::NearMatch; 1092 } 1093 1094 template <int ElementWidth, unsigned Class, 1095 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth, 1096 bool ShiftWidthAlwaysSame> 1097 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const { 1098 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>(); 1099 if (!VectorMatch.isMatch()) 1100 return DiagnosticPredicateTy::NoMatch; 1101 1102 // Give a more specific diagnostic when the user has explicitly typed in 1103 // a shift-amount that does not match what is expected, but for which 1104 // there is also an unscaled addressing mode (e.g. sxtw/uxtw). 1105 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8); 1106 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW || 1107 ShiftExtendTy == AArch64_AM::SXTW) && 1108 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8) 1109 return DiagnosticPredicateTy::NoMatch; 1110 1111 if (MatchShift && ShiftExtendTy == getShiftExtendType()) 1112 return DiagnosticPredicateTy::Match; 1113 1114 return DiagnosticPredicateTy::NearMatch; 1115 } 1116 1117 bool isGPR32as64() const { 1118 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1119 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum); 1120 } 1121 1122 bool isGPR64as32() const { 1123 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1124 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum); 1125 } 1126 1127 bool isWSeqPair() const { 1128 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1129 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains( 1130 Reg.RegNum); 1131 } 1132 1133 bool isXSeqPair() const { 1134 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1135 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains( 1136 Reg.RegNum); 1137 } 1138 1139 template<int64_t Angle, int64_t Remainder> 1140 DiagnosticPredicate isComplexRotation() const { 1141 if (!isImm()) return DiagnosticPredicateTy::NoMatch; 1142 1143 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1144 if (!CE) return DiagnosticPredicateTy::NoMatch; 1145 uint64_t Value = CE->getValue(); 1146 1147 if (Value % Angle == Remainder && Value <= 270) 1148 return DiagnosticPredicateTy::Match; 1149 return DiagnosticPredicateTy::NearMatch; 1150 } 1151 1152 template <unsigned RegClassID> bool isGPR64() const { 1153 return Kind == k_Register && Reg.Kind == RegKind::Scalar && 1154 AArch64MCRegisterClasses[RegClassID].contains(getReg()); 1155 } 1156 1157 template <unsigned RegClassID, int ExtWidth> 1158 DiagnosticPredicate isGPR64WithShiftExtend() const { 1159 if (Kind != k_Register || Reg.Kind != RegKind::Scalar) 1160 return DiagnosticPredicateTy::NoMatch; 1161 1162 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL && 1163 getShiftExtendAmount() == Log2_32(ExtWidth / 8)) 1164 return DiagnosticPredicateTy::Match; 1165 return DiagnosticPredicateTy::NearMatch; 1166 } 1167 1168 /// Is this a vector list with the type implicit (presumably attached to the 1169 /// instruction itself)? 1170 template <RegKind VectorKind, unsigned NumRegs> 1171 bool isImplicitlyTypedVectorList() const { 1172 return Kind == k_VectorList && VectorList.Count == NumRegs && 1173 VectorList.NumElements == 0 && 1174 VectorList.RegisterKind == VectorKind; 1175 } 1176 1177 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements, 1178 unsigned ElementWidth> 1179 bool isTypedVectorList() const { 1180 if (Kind != k_VectorList) 1181 return false; 1182 if (VectorList.Count != NumRegs) 1183 return false; 1184 if (VectorList.RegisterKind != VectorKind) 1185 return false; 1186 if (VectorList.ElementWidth != ElementWidth) 1187 return false; 1188 return VectorList.NumElements == NumElements; 1189 } 1190 1191 template <int Min, int Max> 1192 DiagnosticPredicate isVectorIndex() const { 1193 if (Kind != k_VectorIndex) 1194 return DiagnosticPredicateTy::NoMatch; 1195 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max) 1196 return DiagnosticPredicateTy::Match; 1197 return DiagnosticPredicateTy::NearMatch; 1198 } 1199 1200 bool isToken() const override { return Kind == k_Token; } 1201 1202 bool isTokenEqual(StringRef Str) const { 1203 return Kind == k_Token && getToken() == Str; 1204 } 1205 bool isSysCR() const { return Kind == k_SysCR; } 1206 bool isPrefetch() const { return Kind == k_Prefetch; } 1207 bool isPSBHint() const { return Kind == k_PSBHint; } 1208 bool isBTIHint() const { return Kind == k_BTIHint; } 1209 bool isShiftExtend() const { return Kind == k_ShiftExtend; } 1210 bool isShifter() const { 1211 if (!isShiftExtend()) 1212 return false; 1213 1214 AArch64_AM::ShiftExtendType ST = getShiftExtendType(); 1215 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || 1216 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR || 1217 ST == AArch64_AM::MSL); 1218 } 1219 1220 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const { 1221 if (Kind != k_FPImm) 1222 return DiagnosticPredicateTy::NoMatch; 1223 1224 if (getFPImmIsExact()) { 1225 // Lookup the immediate from table of supported immediates. 1226 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum); 1227 assert(Desc && "Unknown enum value"); 1228 1229 // Calculate its FP value. 1230 APFloat RealVal(APFloat::IEEEdouble()); 1231 auto StatusOrErr = 1232 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero); 1233 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK) 1234 llvm_unreachable("FP immediate is not exact"); 1235 1236 if (getFPImm().bitwiseIsEqual(RealVal)) 1237 return DiagnosticPredicateTy::Match; 1238 } 1239 1240 return DiagnosticPredicateTy::NearMatch; 1241 } 1242 1243 template <unsigned ImmA, unsigned ImmB> 1244 DiagnosticPredicate isExactFPImm() const { 1245 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch; 1246 if ((Res = isExactFPImm<ImmA>())) 1247 return DiagnosticPredicateTy::Match; 1248 if ((Res = isExactFPImm<ImmB>())) 1249 return DiagnosticPredicateTy::Match; 1250 return Res; 1251 } 1252 1253 bool isExtend() const { 1254 if (!isShiftExtend()) 1255 return false; 1256 1257 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1258 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || 1259 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || 1260 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW || 1261 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || 1262 ET == AArch64_AM::LSL) && 1263 getShiftExtendAmount() <= 4; 1264 } 1265 1266 bool isExtend64() const { 1267 if (!isExtend()) 1268 return false; 1269 // Make sure the extend expects a 32-bit source register. 1270 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1271 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB || 1272 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH || 1273 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW; 1274 } 1275 1276 bool isExtendLSL64() const { 1277 if (!isExtend()) 1278 return false; 1279 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1280 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX || 1281 ET == AArch64_AM::LSL) && 1282 getShiftExtendAmount() <= 4; 1283 } 1284 1285 template<int Width> bool isMemXExtend() const { 1286 if (!isExtend()) 1287 return false; 1288 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1289 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) && 1290 (getShiftExtendAmount() == Log2_32(Width / 8) || 1291 getShiftExtendAmount() == 0); 1292 } 1293 1294 template<int Width> bool isMemWExtend() const { 1295 if (!isExtend()) 1296 return false; 1297 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1298 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) && 1299 (getShiftExtendAmount() == Log2_32(Width / 8) || 1300 getShiftExtendAmount() == 0); 1301 } 1302 1303 template <unsigned width> 1304 bool isArithmeticShifter() const { 1305 if (!isShifter()) 1306 return false; 1307 1308 // An arithmetic shifter is LSL, LSR, or ASR. 1309 AArch64_AM::ShiftExtendType ST = getShiftExtendType(); 1310 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || 1311 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width; 1312 } 1313 1314 template <unsigned width> 1315 bool isLogicalShifter() const { 1316 if (!isShifter()) 1317 return false; 1318 1319 // A logical shifter is LSL, LSR, ASR or ROR. 1320 AArch64_AM::ShiftExtendType ST = getShiftExtendType(); 1321 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || 1322 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) && 1323 getShiftExtendAmount() < width; 1324 } 1325 1326 bool isMovImm32Shifter() const { 1327 if (!isShifter()) 1328 return false; 1329 1330 // A MOVi shifter is LSL of 0, 16, 32, or 48. 1331 AArch64_AM::ShiftExtendType ST = getShiftExtendType(); 1332 if (ST != AArch64_AM::LSL) 1333 return false; 1334 uint64_t Val = getShiftExtendAmount(); 1335 return (Val == 0 || Val == 16); 1336 } 1337 1338 bool isMovImm64Shifter() const { 1339 if (!isShifter()) 1340 return false; 1341 1342 // A MOVi shifter is LSL of 0 or 16. 1343 AArch64_AM::ShiftExtendType ST = getShiftExtendType(); 1344 if (ST != AArch64_AM::LSL) 1345 return false; 1346 uint64_t Val = getShiftExtendAmount(); 1347 return (Val == 0 || Val == 16 || Val == 32 || Val == 48); 1348 } 1349 1350 bool isLogicalVecShifter() const { 1351 if (!isShifter()) 1352 return false; 1353 1354 // A logical vector shifter is a left shift by 0, 8, 16, or 24. 1355 unsigned Shift = getShiftExtendAmount(); 1356 return getShiftExtendType() == AArch64_AM::LSL && 1357 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24); 1358 } 1359 1360 bool isLogicalVecHalfWordShifter() const { 1361 if (!isLogicalVecShifter()) 1362 return false; 1363 1364 // A logical vector shifter is a left shift by 0 or 8. 1365 unsigned Shift = getShiftExtendAmount(); 1366 return getShiftExtendType() == AArch64_AM::LSL && 1367 (Shift == 0 || Shift == 8); 1368 } 1369 1370 bool isMoveVecShifter() const { 1371 if (!isShiftExtend()) 1372 return false; 1373 1374 // A logical vector shifter is a left shift by 8 or 16. 1375 unsigned Shift = getShiftExtendAmount(); 1376 return getShiftExtendType() == AArch64_AM::MSL && 1377 (Shift == 8 || Shift == 16); 1378 } 1379 1380 // Fallback unscaled operands are for aliases of LDR/STR that fall back 1381 // to LDUR/STUR when the offset is not legal for the former but is for 1382 // the latter. As such, in addition to checking for being a legal unscaled 1383 // address, also check that it is not a legal scaled address. This avoids 1384 // ambiguity in the matcher. 1385 template<int Width> 1386 bool isSImm9OffsetFB() const { 1387 return isSImm<9>() && !isUImm12Offset<Width / 8>(); 1388 } 1389 1390 bool isAdrpLabel() const { 1391 // Validation was handled during parsing, so we just sanity check that 1392 // something didn't go haywire. 1393 if (!isImm()) 1394 return false; 1395 1396 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 1397 int64_t Val = CE->getValue(); 1398 int64_t Min = - (4096 * (1LL << (21 - 1))); 1399 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); 1400 return (Val % 4096) == 0 && Val >= Min && Val <= Max; 1401 } 1402 1403 return true; 1404 } 1405 1406 bool isAdrLabel() const { 1407 // Validation was handled during parsing, so we just sanity check that 1408 // something didn't go haywire. 1409 if (!isImm()) 1410 return false; 1411 1412 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 1413 int64_t Val = CE->getValue(); 1414 int64_t Min = - (1LL << (21 - 1)); 1415 int64_t Max = ((1LL << (21 - 1)) - 1); 1416 return Val >= Min && Val <= Max; 1417 } 1418 1419 return true; 1420 } 1421 1422 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1423 // Add as immediates when possible. Null MCExpr = 0. 1424 if (!Expr) 1425 Inst.addOperand(MCOperand::createImm(0)); 1426 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1427 Inst.addOperand(MCOperand::createImm(CE->getValue())); 1428 else 1429 Inst.addOperand(MCOperand::createExpr(Expr)); 1430 } 1431 1432 void addRegOperands(MCInst &Inst, unsigned N) const { 1433 assert(N == 1 && "Invalid number of operands!"); 1434 Inst.addOperand(MCOperand::createReg(getReg())); 1435 } 1436 1437 void addGPR32as64Operands(MCInst &Inst, unsigned N) const { 1438 assert(N == 1 && "Invalid number of operands!"); 1439 assert( 1440 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg())); 1441 1442 const MCRegisterInfo *RI = Ctx.getRegisterInfo(); 1443 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister( 1444 RI->getEncodingValue(getReg())); 1445 1446 Inst.addOperand(MCOperand::createReg(Reg)); 1447 } 1448 1449 void addGPR64as32Operands(MCInst &Inst, unsigned N) const { 1450 assert(N == 1 && "Invalid number of operands!"); 1451 assert( 1452 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg())); 1453 1454 const MCRegisterInfo *RI = Ctx.getRegisterInfo(); 1455 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister( 1456 RI->getEncodingValue(getReg())); 1457 1458 Inst.addOperand(MCOperand::createReg(Reg)); 1459 } 1460 1461 template <int Width> 1462 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const { 1463 unsigned Base; 1464 switch (Width) { 1465 case 8: Base = AArch64::B0; break; 1466 case 16: Base = AArch64::H0; break; 1467 case 32: Base = AArch64::S0; break; 1468 case 64: Base = AArch64::D0; break; 1469 case 128: Base = AArch64::Q0; break; 1470 default: 1471 llvm_unreachable("Unsupported width"); 1472 } 1473 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base)); 1474 } 1475 1476 void addVectorReg64Operands(MCInst &Inst, unsigned N) const { 1477 assert(N == 1 && "Invalid number of operands!"); 1478 assert( 1479 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); 1480 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0)); 1481 } 1482 1483 void addVectorReg128Operands(MCInst &Inst, unsigned N) const { 1484 assert(N == 1 && "Invalid number of operands!"); 1485 assert( 1486 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg())); 1487 Inst.addOperand(MCOperand::createReg(getReg())); 1488 } 1489 1490 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const { 1491 assert(N == 1 && "Invalid number of operands!"); 1492 Inst.addOperand(MCOperand::createReg(getReg())); 1493 } 1494 1495 enum VecListIndexType { 1496 VecListIdx_DReg = 0, 1497 VecListIdx_QReg = 1, 1498 VecListIdx_ZReg = 2, 1499 }; 1500 1501 template <VecListIndexType RegTy, unsigned NumRegs> 1502 void addVectorListOperands(MCInst &Inst, unsigned N) const { 1503 assert(N == 1 && "Invalid number of operands!"); 1504 static const unsigned FirstRegs[][5] = { 1505 /* DReg */ { AArch64::Q0, 1506 AArch64::D0, AArch64::D0_D1, 1507 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 }, 1508 /* QReg */ { AArch64::Q0, 1509 AArch64::Q0, AArch64::Q0_Q1, 1510 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 }, 1511 /* ZReg */ { AArch64::Z0, 1512 AArch64::Z0, AArch64::Z0_Z1, 1513 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 } 1514 }; 1515 1516 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) && 1517 " NumRegs must be <= 4 for ZRegs"); 1518 1519 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs]; 1520 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() - 1521 FirstRegs[(unsigned)RegTy][0])); 1522 } 1523 1524 void addVectorIndexOperands(MCInst &Inst, unsigned N) const { 1525 assert(N == 1 && "Invalid number of operands!"); 1526 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 1527 } 1528 1529 template <unsigned ImmIs0, unsigned ImmIs1> 1530 void addExactFPImmOperands(MCInst &Inst, unsigned N) const { 1531 assert(N == 1 && "Invalid number of operands!"); 1532 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand"); 1533 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>()))); 1534 } 1535 1536 void addImmOperands(MCInst &Inst, unsigned N) const { 1537 assert(N == 1 && "Invalid number of operands!"); 1538 // If this is a pageoff symrefexpr with an addend, adjust the addend 1539 // to be only the page-offset portion. Otherwise, just add the expr 1540 // as-is. 1541 addExpr(Inst, getImm()); 1542 } 1543 1544 template <int Shift> 1545 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { 1546 assert(N == 2 && "Invalid number of operands!"); 1547 if (auto ShiftedVal = getShiftedVal<Shift>()) { 1548 Inst.addOperand(MCOperand::createImm(ShiftedVal->first)); 1549 Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); 1550 } else if (isShiftedImm()) { 1551 addExpr(Inst, getShiftedImmVal()); 1552 Inst.addOperand(MCOperand::createImm(getShiftedImmShift())); 1553 } else { 1554 addExpr(Inst, getImm()); 1555 Inst.addOperand(MCOperand::createImm(0)); 1556 } 1557 } 1558 1559 template <int Shift> 1560 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const { 1561 assert(N == 2 && "Invalid number of operands!"); 1562 if (auto ShiftedVal = getShiftedVal<Shift>()) { 1563 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first)); 1564 Inst.addOperand(MCOperand::createImm(ShiftedVal->second)); 1565 } else 1566 llvm_unreachable("Not a shifted negative immediate"); 1567 } 1568 1569 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1570 assert(N == 1 && "Invalid number of operands!"); 1571 Inst.addOperand(MCOperand::createImm(getCondCode())); 1572 } 1573 1574 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { 1575 assert(N == 1 && "Invalid number of operands!"); 1576 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 1577 if (!MCE) 1578 addExpr(Inst, getImm()); 1579 else 1580 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12)); 1581 } 1582 1583 void addAdrLabelOperands(MCInst &Inst, unsigned N) const { 1584 addImmOperands(Inst, N); 1585 } 1586 1587 template<int Scale> 1588 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1589 assert(N == 1 && "Invalid number of operands!"); 1590 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 1591 1592 if (!MCE) { 1593 Inst.addOperand(MCOperand::createExpr(getImm())); 1594 return; 1595 } 1596 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); 1597 } 1598 1599 void addUImm6Operands(MCInst &Inst, unsigned N) const { 1600 assert(N == 1 && "Invalid number of operands!"); 1601 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1602 Inst.addOperand(MCOperand::createImm(MCE->getValue())); 1603 } 1604 1605 template <int Scale> 1606 void addImmScaledOperands(MCInst &Inst, unsigned N) const { 1607 assert(N == 1 && "Invalid number of operands!"); 1608 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1609 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale)); 1610 } 1611 1612 template <typename T> 1613 void addLogicalImmOperands(MCInst &Inst, unsigned N) const { 1614 assert(N == 1 && "Invalid number of operands!"); 1615 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1616 std::make_unsigned_t<T> Val = MCE->getValue(); 1617 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); 1618 Inst.addOperand(MCOperand::createImm(encoding)); 1619 } 1620 1621 template <typename T> 1622 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const { 1623 assert(N == 1 && "Invalid number of operands!"); 1624 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1625 std::make_unsigned_t<T> Val = ~MCE->getValue(); 1626 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8); 1627 Inst.addOperand(MCOperand::createImm(encoding)); 1628 } 1629 1630 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { 1631 assert(N == 1 && "Invalid number of operands!"); 1632 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1633 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); 1634 Inst.addOperand(MCOperand::createImm(encoding)); 1635 } 1636 1637 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const { 1638 // Branch operands don't encode the low bits, so shift them off 1639 // here. If it's a label, however, just put it on directly as there's 1640 // not enough information now to do anything. 1641 assert(N == 1 && "Invalid number of operands!"); 1642 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 1643 if (!MCE) { 1644 addExpr(Inst, getImm()); 1645 return; 1646 } 1647 assert(MCE && "Invalid constant immediate operand!"); 1648 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); 1649 } 1650 1651 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const { 1652 // Branch operands don't encode the low bits, so shift them off 1653 // here. If it's a label, however, just put it on directly as there's 1654 // not enough information now to do anything. 1655 assert(N == 1 && "Invalid number of operands!"); 1656 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 1657 if (!MCE) { 1658 addExpr(Inst, getImm()); 1659 return; 1660 } 1661 assert(MCE && "Invalid constant immediate operand!"); 1662 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); 1663 } 1664 1665 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const { 1666 // Branch operands don't encode the low bits, so shift them off 1667 // here. If it's a label, however, just put it on directly as there's 1668 // not enough information now to do anything. 1669 assert(N == 1 && "Invalid number of operands!"); 1670 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm()); 1671 if (!MCE) { 1672 addExpr(Inst, getImm()); 1673 return; 1674 } 1675 assert(MCE && "Invalid constant immediate operand!"); 1676 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2)); 1677 } 1678 1679 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1680 assert(N == 1 && "Invalid number of operands!"); 1681 Inst.addOperand(MCOperand::createImm( 1682 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()))); 1683 } 1684 1685 void addBarrierOperands(MCInst &Inst, unsigned N) const { 1686 assert(N == 1 && "Invalid number of operands!"); 1687 Inst.addOperand(MCOperand::createImm(getBarrier())); 1688 } 1689 1690 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const { 1691 assert(N == 1 && "Invalid number of operands!"); 1692 1693 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg)); 1694 } 1695 1696 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const { 1697 assert(N == 1 && "Invalid number of operands!"); 1698 1699 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg)); 1700 } 1701 1702 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const { 1703 assert(N == 1 && "Invalid number of operands!"); 1704 1705 Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); 1706 } 1707 1708 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const { 1709 assert(N == 1 && "Invalid number of operands!"); 1710 1711 Inst.addOperand(MCOperand::createImm(SysReg.PStateField)); 1712 } 1713 1714 void addSysCROperands(MCInst &Inst, unsigned N) const { 1715 assert(N == 1 && "Invalid number of operands!"); 1716 Inst.addOperand(MCOperand::createImm(getSysCR())); 1717 } 1718 1719 void addPrefetchOperands(MCInst &Inst, unsigned N) const { 1720 assert(N == 1 && "Invalid number of operands!"); 1721 Inst.addOperand(MCOperand::createImm(getPrefetch())); 1722 } 1723 1724 void addPSBHintOperands(MCInst &Inst, unsigned N) const { 1725 assert(N == 1 && "Invalid number of operands!"); 1726 Inst.addOperand(MCOperand::createImm(getPSBHint())); 1727 } 1728 1729 void addBTIHintOperands(MCInst &Inst, unsigned N) const { 1730 assert(N == 1 && "Invalid number of operands!"); 1731 Inst.addOperand(MCOperand::createImm(getBTIHint())); 1732 } 1733 1734 void addShifterOperands(MCInst &Inst, unsigned N) const { 1735 assert(N == 1 && "Invalid number of operands!"); 1736 unsigned Imm = 1737 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount()); 1738 Inst.addOperand(MCOperand::createImm(Imm)); 1739 } 1740 1741 void addExtendOperands(MCInst &Inst, unsigned N) const { 1742 assert(N == 1 && "Invalid number of operands!"); 1743 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1744 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW; 1745 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); 1746 Inst.addOperand(MCOperand::createImm(Imm)); 1747 } 1748 1749 void addExtend64Operands(MCInst &Inst, unsigned N) const { 1750 assert(N == 1 && "Invalid number of operands!"); 1751 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1752 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX; 1753 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount()); 1754 Inst.addOperand(MCOperand::createImm(Imm)); 1755 } 1756 1757 void addMemExtendOperands(MCInst &Inst, unsigned N) const { 1758 assert(N == 2 && "Invalid number of operands!"); 1759 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1760 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; 1761 Inst.addOperand(MCOperand::createImm(IsSigned)); 1762 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0)); 1763 } 1764 1765 // For 8-bit load/store instructions with a register offset, both the 1766 // "DoShift" and "NoShift" variants have a shift of 0. Because of this, 1767 // they're disambiguated by whether the shift was explicit or implicit rather 1768 // than its size. 1769 void addMemExtend8Operands(MCInst &Inst, unsigned N) const { 1770 assert(N == 2 && "Invalid number of operands!"); 1771 AArch64_AM::ShiftExtendType ET = getShiftExtendType(); 1772 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX; 1773 Inst.addOperand(MCOperand::createImm(IsSigned)); 1774 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount())); 1775 } 1776 1777 template<int Shift> 1778 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { 1779 assert(N == 1 && "Invalid number of operands!"); 1780 1781 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1782 if (CE) { 1783 uint64_t Value = CE->getValue(); 1784 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); 1785 } else { 1786 addExpr(Inst, getImm()); 1787 } 1788 } 1789 1790 template<int Shift> 1791 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { 1792 assert(N == 1 && "Invalid number of operands!"); 1793 1794 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); 1795 uint64_t Value = CE->getValue(); 1796 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); 1797 } 1798 1799 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { 1800 assert(N == 1 && "Invalid number of operands!"); 1801 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1802 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90)); 1803 } 1804 1805 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { 1806 assert(N == 1 && "Invalid number of operands!"); 1807 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm()); 1808 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180)); 1809 } 1810 1811 void print(raw_ostream &OS) const override; 1812 1813 static std::unique_ptr<AArch64Operand> 1814 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) { 1815 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx); 1816 Op->Tok.Data = Str.data(); 1817 Op->Tok.Length = Str.size(); 1818 Op->Tok.IsSuffix = IsSuffix; 1819 Op->StartLoc = S; 1820 Op->EndLoc = S; 1821 return Op; 1822 } 1823 1824 static std::unique_ptr<AArch64Operand> 1825 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx, 1826 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg, 1827 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, 1828 unsigned ShiftAmount = 0, 1829 unsigned HasExplicitAmount = false) { 1830 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx); 1831 Op->Reg.RegNum = RegNum; 1832 Op->Reg.Kind = Kind; 1833 Op->Reg.ElementWidth = 0; 1834 Op->Reg.EqualityTy = EqTy; 1835 Op->Reg.ShiftExtend.Type = ExtTy; 1836 Op->Reg.ShiftExtend.Amount = ShiftAmount; 1837 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount; 1838 Op->StartLoc = S; 1839 Op->EndLoc = E; 1840 return Op; 1841 } 1842 1843 static std::unique_ptr<AArch64Operand> 1844 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth, 1845 SMLoc S, SMLoc E, MCContext &Ctx, 1846 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL, 1847 unsigned ShiftAmount = 0, 1848 unsigned HasExplicitAmount = false) { 1849 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector || 1850 Kind == RegKind::SVEPredicateVector) && 1851 "Invalid vector kind"); 1852 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount, 1853 HasExplicitAmount); 1854 Op->Reg.ElementWidth = ElementWidth; 1855 return Op; 1856 } 1857 1858 static std::unique_ptr<AArch64Operand> 1859 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements, 1860 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E, 1861 MCContext &Ctx) { 1862 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx); 1863 Op->VectorList.RegNum = RegNum; 1864 Op->VectorList.Count = Count; 1865 Op->VectorList.NumElements = NumElements; 1866 Op->VectorList.ElementWidth = ElementWidth; 1867 Op->VectorList.RegisterKind = RegisterKind; 1868 Op->StartLoc = S; 1869 Op->EndLoc = E; 1870 return Op; 1871 } 1872 1873 static std::unique_ptr<AArch64Operand> 1874 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { 1875 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx); 1876 Op->VectorIndex.Val = Idx; 1877 Op->StartLoc = S; 1878 Op->EndLoc = E; 1879 return Op; 1880 } 1881 1882 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S, 1883 SMLoc E, MCContext &Ctx) { 1884 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx); 1885 Op->Imm.Val = Val; 1886 Op->StartLoc = S; 1887 Op->EndLoc = E; 1888 return Op; 1889 } 1890 1891 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val, 1892 unsigned ShiftAmount, 1893 SMLoc S, SMLoc E, 1894 MCContext &Ctx) { 1895 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx); 1896 Op->ShiftedImm .Val = Val; 1897 Op->ShiftedImm.ShiftAmount = ShiftAmount; 1898 Op->StartLoc = S; 1899 Op->EndLoc = E; 1900 return Op; 1901 } 1902 1903 static std::unique_ptr<AArch64Operand> 1904 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) { 1905 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx); 1906 Op->CondCode.Code = Code; 1907 Op->StartLoc = S; 1908 Op->EndLoc = E; 1909 return Op; 1910 } 1911 1912 static std::unique_ptr<AArch64Operand> 1913 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) { 1914 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx); 1915 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue(); 1916 Op->FPImm.IsExact = IsExact; 1917 Op->StartLoc = S; 1918 Op->EndLoc = S; 1919 return Op; 1920 } 1921 1922 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, 1923 StringRef Str, 1924 SMLoc S, 1925 MCContext &Ctx) { 1926 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx); 1927 Op->Barrier.Val = Val; 1928 Op->Barrier.Data = Str.data(); 1929 Op->Barrier.Length = Str.size(); 1930 Op->StartLoc = S; 1931 Op->EndLoc = S; 1932 return Op; 1933 } 1934 1935 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S, 1936 uint32_t MRSReg, 1937 uint32_t MSRReg, 1938 uint32_t PStateField, 1939 MCContext &Ctx) { 1940 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx); 1941 Op->SysReg.Data = Str.data(); 1942 Op->SysReg.Length = Str.size(); 1943 Op->SysReg.MRSReg = MRSReg; 1944 Op->SysReg.MSRReg = MSRReg; 1945 Op->SysReg.PStateField = PStateField; 1946 Op->StartLoc = S; 1947 Op->EndLoc = S; 1948 return Op; 1949 } 1950 1951 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S, 1952 SMLoc E, MCContext &Ctx) { 1953 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx); 1954 Op->SysCRImm.Val = Val; 1955 Op->StartLoc = S; 1956 Op->EndLoc = E; 1957 return Op; 1958 } 1959 1960 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, 1961 StringRef Str, 1962 SMLoc S, 1963 MCContext &Ctx) { 1964 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx); 1965 Op->Prefetch.Val = Val; 1966 Op->Barrier.Data = Str.data(); 1967 Op->Barrier.Length = Str.size(); 1968 Op->StartLoc = S; 1969 Op->EndLoc = S; 1970 return Op; 1971 } 1972 1973 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val, 1974 StringRef Str, 1975 SMLoc S, 1976 MCContext &Ctx) { 1977 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx); 1978 Op->PSBHint.Val = Val; 1979 Op->PSBHint.Data = Str.data(); 1980 Op->PSBHint.Length = Str.size(); 1981 Op->StartLoc = S; 1982 Op->EndLoc = S; 1983 return Op; 1984 } 1985 1986 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val, 1987 StringRef Str, 1988 SMLoc S, 1989 MCContext &Ctx) { 1990 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx); 1991 Op->BTIHint.Val = Val << 1 | 32; 1992 Op->BTIHint.Data = Str.data(); 1993 Op->BTIHint.Length = Str.size(); 1994 Op->StartLoc = S; 1995 Op->EndLoc = S; 1996 return Op; 1997 } 1998 1999 static std::unique_ptr<AArch64Operand> 2000 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val, 2001 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) { 2002 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx); 2003 Op->ShiftExtend.Type = ShOp; 2004 Op->ShiftExtend.Amount = Val; 2005 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount; 2006 Op->StartLoc = S; 2007 Op->EndLoc = E; 2008 return Op; 2009 } 2010 }; 2011 2012 } // end anonymous namespace. 2013 2014 void AArch64Operand::print(raw_ostream &OS) const { 2015 switch (Kind) { 2016 case k_FPImm: 2017 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue(); 2018 if (!getFPImmIsExact()) 2019 OS << " (inexact)"; 2020 OS << ">"; 2021 break; 2022 case k_Barrier: { 2023 StringRef Name = getBarrierName(); 2024 if (!Name.empty()) 2025 OS << "<barrier " << Name << ">"; 2026 else 2027 OS << "<barrier invalid #" << getBarrier() << ">"; 2028 break; 2029 } 2030 case k_Immediate: 2031 OS << *getImm(); 2032 break; 2033 case k_ShiftedImm: { 2034 unsigned Shift = getShiftedImmShift(); 2035 OS << "<shiftedimm "; 2036 OS << *getShiftedImmVal(); 2037 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">"; 2038 break; 2039 } 2040 case k_CondCode: 2041 OS << "<condcode " << getCondCode() << ">"; 2042 break; 2043 case k_VectorList: { 2044 OS << "<vectorlist "; 2045 unsigned Reg = getVectorListStart(); 2046 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i) 2047 OS << Reg + i << " "; 2048 OS << ">"; 2049 break; 2050 } 2051 case k_VectorIndex: 2052 OS << "<vectorindex " << getVectorIndex() << ">"; 2053 break; 2054 case k_SysReg: 2055 OS << "<sysreg: " << getSysReg() << '>'; 2056 break; 2057 case k_Token: 2058 OS << "'" << getToken() << "'"; 2059 break; 2060 case k_SysCR: 2061 OS << "c" << getSysCR(); 2062 break; 2063 case k_Prefetch: { 2064 StringRef Name = getPrefetchName(); 2065 if (!Name.empty()) 2066 OS << "<prfop " << Name << ">"; 2067 else 2068 OS << "<prfop invalid #" << getPrefetch() << ">"; 2069 break; 2070 } 2071 case k_PSBHint: 2072 OS << getPSBHintName(); 2073 break; 2074 case k_Register: 2075 OS << "<register " << getReg() << ">"; 2076 if (!getShiftExtendAmount() && !hasShiftExtendAmount()) 2077 break; 2078 LLVM_FALLTHROUGH; 2079 case k_BTIHint: 2080 OS << getBTIHintName(); 2081 break; 2082 case k_ShiftExtend: 2083 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #" 2084 << getShiftExtendAmount(); 2085 if (!hasShiftExtendAmount()) 2086 OS << "<imp>"; 2087 OS << '>'; 2088 break; 2089 } 2090 } 2091 2092 /// @name Auto-generated Match Functions 2093 /// { 2094 2095 static unsigned MatchRegisterName(StringRef Name); 2096 2097 /// } 2098 2099 static unsigned MatchNeonVectorRegName(StringRef Name) { 2100 return StringSwitch<unsigned>(Name.lower()) 2101 .Case("v0", AArch64::Q0) 2102 .Case("v1", AArch64::Q1) 2103 .Case("v2", AArch64::Q2) 2104 .Case("v3", AArch64::Q3) 2105 .Case("v4", AArch64::Q4) 2106 .Case("v5", AArch64::Q5) 2107 .Case("v6", AArch64::Q6) 2108 .Case("v7", AArch64::Q7) 2109 .Case("v8", AArch64::Q8) 2110 .Case("v9", AArch64::Q9) 2111 .Case("v10", AArch64::Q10) 2112 .Case("v11", AArch64::Q11) 2113 .Case("v12", AArch64::Q12) 2114 .Case("v13", AArch64::Q13) 2115 .Case("v14", AArch64::Q14) 2116 .Case("v15", AArch64::Q15) 2117 .Case("v16", AArch64::Q16) 2118 .Case("v17", AArch64::Q17) 2119 .Case("v18", AArch64::Q18) 2120 .Case("v19", AArch64::Q19) 2121 .Case("v20", AArch64::Q20) 2122 .Case("v21", AArch64::Q21) 2123 .Case("v22", AArch64::Q22) 2124 .Case("v23", AArch64::Q23) 2125 .Case("v24", AArch64::Q24) 2126 .Case("v25", AArch64::Q25) 2127 .Case("v26", AArch64::Q26) 2128 .Case("v27", AArch64::Q27) 2129 .Case("v28", AArch64::Q28) 2130 .Case("v29", AArch64::Q29) 2131 .Case("v30", AArch64::Q30) 2132 .Case("v31", AArch64::Q31) 2133 .Default(0); 2134 } 2135 2136 /// Returns an optional pair of (#elements, element-width) if Suffix 2137 /// is a valid vector kind. Where the number of elements in a vector 2138 /// or the vector width is implicit or explicitly unknown (but still a 2139 /// valid suffix kind), 0 is used. 2140 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix, 2141 RegKind VectorKind) { 2142 std::pair<int, int> Res = {-1, -1}; 2143 2144 switch (VectorKind) { 2145 case RegKind::NeonVector: 2146 Res = 2147 StringSwitch<std::pair<int, int>>(Suffix.lower()) 2148 .Case("", {0, 0}) 2149 .Case(".1d", {1, 64}) 2150 .Case(".1q", {1, 128}) 2151 // '.2h' needed for fp16 scalar pairwise reductions 2152 .Case(".2h", {2, 16}) 2153 .Case(".2s", {2, 32}) 2154 .Case(".2d", {2, 64}) 2155 // '.4b' is another special case for the ARMv8.2a dot product 2156 // operand 2157 .Case(".4b", {4, 8}) 2158 .Case(".4h", {4, 16}) 2159 .Case(".4s", {4, 32}) 2160 .Case(".8b", {8, 8}) 2161 .Case(".8h", {8, 16}) 2162 .Case(".16b", {16, 8}) 2163 // Accept the width neutral ones, too, for verbose syntax. If those 2164 // aren't used in the right places, the token operand won't match so 2165 // all will work out. 2166 .Case(".b", {0, 8}) 2167 .Case(".h", {0, 16}) 2168 .Case(".s", {0, 32}) 2169 .Case(".d", {0, 64}) 2170 .Default({-1, -1}); 2171 break; 2172 case RegKind::SVEPredicateVector: 2173 case RegKind::SVEDataVector: 2174 Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) 2175 .Case("", {0, 0}) 2176 .Case(".b", {0, 8}) 2177 .Case(".h", {0, 16}) 2178 .Case(".s", {0, 32}) 2179 .Case(".d", {0, 64}) 2180 .Case(".q", {0, 128}) 2181 .Default({-1, -1}); 2182 break; 2183 default: 2184 llvm_unreachable("Unsupported RegKind"); 2185 } 2186 2187 if (Res == std::make_pair(-1, -1)) 2188 return Optional<std::pair<int, int>>(); 2189 2190 return Optional<std::pair<int, int>>(Res); 2191 } 2192 2193 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { 2194 return parseVectorKind(Suffix, VectorKind).hasValue(); 2195 } 2196 2197 static unsigned matchSVEDataVectorRegName(StringRef Name) { 2198 return StringSwitch<unsigned>(Name.lower()) 2199 .Case("z0", AArch64::Z0) 2200 .Case("z1", AArch64::Z1) 2201 .Case("z2", AArch64::Z2) 2202 .Case("z3", AArch64::Z3) 2203 .Case("z4", AArch64::Z4) 2204 .Case("z5", AArch64::Z5) 2205 .Case("z6", AArch64::Z6) 2206 .Case("z7", AArch64::Z7) 2207 .Case("z8", AArch64::Z8) 2208 .Case("z9", AArch64::Z9) 2209 .Case("z10", AArch64::Z10) 2210 .Case("z11", AArch64::Z11) 2211 .Case("z12", AArch64::Z12) 2212 .Case("z13", AArch64::Z13) 2213 .Case("z14", AArch64::Z14) 2214 .Case("z15", AArch64::Z15) 2215 .Case("z16", AArch64::Z16) 2216 .Case("z17", AArch64::Z17) 2217 .Case("z18", AArch64::Z18) 2218 .Case("z19", AArch64::Z19) 2219 .Case("z20", AArch64::Z20) 2220 .Case("z21", AArch64::Z21) 2221 .Case("z22", AArch64::Z22) 2222 .Case("z23", AArch64::Z23) 2223 .Case("z24", AArch64::Z24) 2224 .Case("z25", AArch64::Z25) 2225 .Case("z26", AArch64::Z26) 2226 .Case("z27", AArch64::Z27) 2227 .Case("z28", AArch64::Z28) 2228 .Case("z29", AArch64::Z29) 2229 .Case("z30", AArch64::Z30) 2230 .Case("z31", AArch64::Z31) 2231 .Default(0); 2232 } 2233 2234 static unsigned matchSVEPredicateVectorRegName(StringRef Name) { 2235 return StringSwitch<unsigned>(Name.lower()) 2236 .Case("p0", AArch64::P0) 2237 .Case("p1", AArch64::P1) 2238 .Case("p2", AArch64::P2) 2239 .Case("p3", AArch64::P3) 2240 .Case("p4", AArch64::P4) 2241 .Case("p5", AArch64::P5) 2242 .Case("p6", AArch64::P6) 2243 .Case("p7", AArch64::P7) 2244 .Case("p8", AArch64::P8) 2245 .Case("p9", AArch64::P9) 2246 .Case("p10", AArch64::P10) 2247 .Case("p11", AArch64::P11) 2248 .Case("p12", AArch64::P12) 2249 .Case("p13", AArch64::P13) 2250 .Case("p14", AArch64::P14) 2251 .Case("p15", AArch64::P15) 2252 .Default(0); 2253 } 2254 2255 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, 2256 SMLoc &EndLoc) { 2257 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success; 2258 } 2259 2260 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo, 2261 SMLoc &StartLoc, 2262 SMLoc &EndLoc) { 2263 StartLoc = getLoc(); 2264 auto Res = tryParseScalarRegister(RegNo); 2265 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2266 return Res; 2267 } 2268 2269 // Matches a register name or register alias previously defined by '.req' 2270 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, 2271 RegKind Kind) { 2272 unsigned RegNum = 0; 2273 if ((RegNum = matchSVEDataVectorRegName(Name))) 2274 return Kind == RegKind::SVEDataVector ? RegNum : 0; 2275 2276 if ((RegNum = matchSVEPredicateVectorRegName(Name))) 2277 return Kind == RegKind::SVEPredicateVector ? RegNum : 0; 2278 2279 if ((RegNum = MatchNeonVectorRegName(Name))) 2280 return Kind == RegKind::NeonVector ? RegNum : 0; 2281 2282 // The parsed register must be of RegKind Scalar 2283 if ((RegNum = MatchRegisterName(Name))) 2284 return Kind == RegKind::Scalar ? RegNum : 0; 2285 2286 if (!RegNum) { 2287 // Handle a few common aliases of registers. 2288 if (auto RegNum = StringSwitch<unsigned>(Name.lower()) 2289 .Case("fp", AArch64::FP) 2290 .Case("lr", AArch64::LR) 2291 .Case("x31", AArch64::XZR) 2292 .Case("w31", AArch64::WZR) 2293 .Default(0)) 2294 return Kind == RegKind::Scalar ? RegNum : 0; 2295 2296 // Check for aliases registered via .req. Canonicalize to lower case. 2297 // That's more consistent since register names are case insensitive, and 2298 // it's how the original entry was passed in from MC/MCParser/AsmParser. 2299 auto Entry = RegisterReqs.find(Name.lower()); 2300 if (Entry == RegisterReqs.end()) 2301 return 0; 2302 2303 // set RegNum if the match is the right kind of register 2304 if (Kind == Entry->getValue().first) 2305 RegNum = Entry->getValue().second; 2306 } 2307 return RegNum; 2308 } 2309 2310 /// tryParseScalarRegister - Try to parse a register name. The token must be an 2311 /// Identifier when called, and if it is a register name the token is eaten and 2312 /// the register is added to the operand list. 2313 OperandMatchResultTy 2314 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) { 2315 MCAsmParser &Parser = getParser(); 2316 const AsmToken &Tok = Parser.getTok(); 2317 if (Tok.isNot(AsmToken::Identifier)) 2318 return MatchOperand_NoMatch; 2319 2320 std::string lowerCase = Tok.getString().lower(); 2321 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar); 2322 if (Reg == 0) 2323 return MatchOperand_NoMatch; 2324 2325 RegNum = Reg; 2326 Parser.Lex(); // Eat identifier token. 2327 return MatchOperand_Success; 2328 } 2329 2330 /// tryParseSysCROperand - Try to parse a system instruction CR operand name. 2331 OperandMatchResultTy 2332 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) { 2333 MCAsmParser &Parser = getParser(); 2334 SMLoc S = getLoc(); 2335 2336 if (Parser.getTok().isNot(AsmToken::Identifier)) { 2337 Error(S, "Expected cN operand where 0 <= N <= 15"); 2338 return MatchOperand_ParseFail; 2339 } 2340 2341 StringRef Tok = Parser.getTok().getIdentifier(); 2342 if (Tok[0] != 'c' && Tok[0] != 'C') { 2343 Error(S, "Expected cN operand where 0 <= N <= 15"); 2344 return MatchOperand_ParseFail; 2345 } 2346 2347 uint32_t CRNum; 2348 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum); 2349 if (BadNum || CRNum > 15) { 2350 Error(S, "Expected cN operand where 0 <= N <= 15"); 2351 return MatchOperand_ParseFail; 2352 } 2353 2354 Parser.Lex(); // Eat identifier token. 2355 Operands.push_back( 2356 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext())); 2357 return MatchOperand_Success; 2358 } 2359 2360 /// tryParsePrefetch - Try to parse a prefetch operand. 2361 template <bool IsSVEPrefetch> 2362 OperandMatchResultTy 2363 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) { 2364 MCAsmParser &Parser = getParser(); 2365 SMLoc S = getLoc(); 2366 const AsmToken &Tok = Parser.getTok(); 2367 2368 auto LookupByName = [](StringRef N) { 2369 if (IsSVEPrefetch) { 2370 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N)) 2371 return Optional<unsigned>(Res->Encoding); 2372 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N)) 2373 return Optional<unsigned>(Res->Encoding); 2374 return Optional<unsigned>(); 2375 }; 2376 2377 auto LookupByEncoding = [](unsigned E) { 2378 if (IsSVEPrefetch) { 2379 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E)) 2380 return Optional<StringRef>(Res->Name); 2381 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E)) 2382 return Optional<StringRef>(Res->Name); 2383 return Optional<StringRef>(); 2384 }; 2385 unsigned MaxVal = IsSVEPrefetch ? 15 : 31; 2386 2387 // Either an identifier for named values or a 5-bit immediate. 2388 // Eat optional hash. 2389 if (parseOptionalToken(AsmToken::Hash) || 2390 Tok.is(AsmToken::Integer)) { 2391 const MCExpr *ImmVal; 2392 if (getParser().parseExpression(ImmVal)) 2393 return MatchOperand_ParseFail; 2394 2395 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2396 if (!MCE) { 2397 TokError("immediate value expected for prefetch operand"); 2398 return MatchOperand_ParseFail; 2399 } 2400 unsigned prfop = MCE->getValue(); 2401 if (prfop > MaxVal) { 2402 TokError("prefetch operand out of range, [0," + utostr(MaxVal) + 2403 "] expected"); 2404 return MatchOperand_ParseFail; 2405 } 2406 2407 auto PRFM = LookupByEncoding(MCE->getValue()); 2408 Operands.push_back(AArch64Operand::CreatePrefetch( 2409 prfop, PRFM.getValueOr(""), S, getContext())); 2410 return MatchOperand_Success; 2411 } 2412 2413 if (Tok.isNot(AsmToken::Identifier)) { 2414 TokError("prefetch hint expected"); 2415 return MatchOperand_ParseFail; 2416 } 2417 2418 auto PRFM = LookupByName(Tok.getString()); 2419 if (!PRFM) { 2420 TokError("prefetch hint expected"); 2421 return MatchOperand_ParseFail; 2422 } 2423 2424 Operands.push_back(AArch64Operand::CreatePrefetch( 2425 *PRFM, Tok.getString(), S, getContext())); 2426 Parser.Lex(); // Eat identifier token. 2427 return MatchOperand_Success; 2428 } 2429 2430 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command 2431 OperandMatchResultTy 2432 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) { 2433 MCAsmParser &Parser = getParser(); 2434 SMLoc S = getLoc(); 2435 const AsmToken &Tok = Parser.getTok(); 2436 if (Tok.isNot(AsmToken::Identifier)) { 2437 TokError("invalid operand for instruction"); 2438 return MatchOperand_ParseFail; 2439 } 2440 2441 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString()); 2442 if (!PSB) { 2443 TokError("invalid operand for instruction"); 2444 return MatchOperand_ParseFail; 2445 } 2446 2447 Operands.push_back(AArch64Operand::CreatePSBHint( 2448 PSB->Encoding, Tok.getString(), S, getContext())); 2449 Parser.Lex(); // Eat identifier token. 2450 return MatchOperand_Success; 2451 } 2452 2453 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command 2454 OperandMatchResultTy 2455 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) { 2456 MCAsmParser &Parser = getParser(); 2457 SMLoc S = getLoc(); 2458 const AsmToken &Tok = Parser.getTok(); 2459 if (Tok.isNot(AsmToken::Identifier)) { 2460 TokError("invalid operand for instruction"); 2461 return MatchOperand_ParseFail; 2462 } 2463 2464 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString()); 2465 if (!BTI) { 2466 TokError("invalid operand for instruction"); 2467 return MatchOperand_ParseFail; 2468 } 2469 2470 Operands.push_back(AArch64Operand::CreateBTIHint( 2471 BTI->Encoding, Tok.getString(), S, getContext())); 2472 Parser.Lex(); // Eat identifier token. 2473 return MatchOperand_Success; 2474 } 2475 2476 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP 2477 /// instruction. 2478 OperandMatchResultTy 2479 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) { 2480 MCAsmParser &Parser = getParser(); 2481 SMLoc S = getLoc(); 2482 const MCExpr *Expr = nullptr; 2483 2484 if (Parser.getTok().is(AsmToken::Hash)) { 2485 Parser.Lex(); // Eat hash token. 2486 } 2487 2488 if (parseSymbolicImmVal(Expr)) 2489 return MatchOperand_ParseFail; 2490 2491 AArch64MCExpr::VariantKind ELFRefKind; 2492 MCSymbolRefExpr::VariantKind DarwinRefKind; 2493 int64_t Addend; 2494 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { 2495 if (DarwinRefKind == MCSymbolRefExpr::VK_None && 2496 ELFRefKind == AArch64MCExpr::VK_INVALID) { 2497 // No modifier was specified at all; this is the syntax for an ELF basic 2498 // ADRP relocation (unfortunately). 2499 Expr = 2500 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext()); 2501 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE || 2502 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) && 2503 Addend != 0) { 2504 Error(S, "gotpage label reference not allowed an addend"); 2505 return MatchOperand_ParseFail; 2506 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE && 2507 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE && 2508 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE && 2509 ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC && 2510 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE && 2511 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE && 2512 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) { 2513 // The operand must be an @page or @gotpage qualified symbolref. 2514 Error(S, "page or gotpage label reference expected"); 2515 return MatchOperand_ParseFail; 2516 } 2517 } 2518 2519 // We have either a label reference possibly with addend or an immediate. The 2520 // addend is a raw value here. The linker will adjust it to only reference the 2521 // page. 2522 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2523 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); 2524 2525 return MatchOperand_Success; 2526 } 2527 2528 /// tryParseAdrLabel - Parse and validate a source label for the ADR 2529 /// instruction. 2530 OperandMatchResultTy 2531 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) { 2532 SMLoc S = getLoc(); 2533 const MCExpr *Expr = nullptr; 2534 2535 // Leave anything with a bracket to the default for SVE 2536 if (getParser().getTok().is(AsmToken::LBrac)) 2537 return MatchOperand_NoMatch; 2538 2539 if (getParser().getTok().is(AsmToken::Hash)) 2540 getParser().Lex(); // Eat hash token. 2541 2542 if (parseSymbolicImmVal(Expr)) 2543 return MatchOperand_ParseFail; 2544 2545 AArch64MCExpr::VariantKind ELFRefKind; 2546 MCSymbolRefExpr::VariantKind DarwinRefKind; 2547 int64_t Addend; 2548 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { 2549 if (DarwinRefKind == MCSymbolRefExpr::VK_None && 2550 ELFRefKind == AArch64MCExpr::VK_INVALID) { 2551 // No modifier was specified at all; this is the syntax for an ELF basic 2552 // ADR relocation (unfortunately). 2553 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext()); 2554 } else { 2555 Error(S, "unexpected adr label"); 2556 return MatchOperand_ParseFail; 2557 } 2558 } 2559 2560 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2561 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); 2562 return MatchOperand_Success; 2563 } 2564 2565 /// tryParseFPImm - A floating point immediate expression operand. 2566 template<bool AddFPZeroAsLiteral> 2567 OperandMatchResultTy 2568 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) { 2569 MCAsmParser &Parser = getParser(); 2570 SMLoc S = getLoc(); 2571 2572 bool Hash = parseOptionalToken(AsmToken::Hash); 2573 2574 // Handle negation, as that still comes through as a separate token. 2575 bool isNegative = parseOptionalToken(AsmToken::Minus); 2576 2577 const AsmToken &Tok = Parser.getTok(); 2578 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) { 2579 if (!Hash) 2580 return MatchOperand_NoMatch; 2581 TokError("invalid floating point immediate"); 2582 return MatchOperand_ParseFail; 2583 } 2584 2585 // Parse hexadecimal representation. 2586 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) { 2587 if (Tok.getIntVal() > 255 || isNegative) { 2588 TokError("encoded floating point value out of range"); 2589 return MatchOperand_ParseFail; 2590 } 2591 2592 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal())); 2593 Operands.push_back( 2594 AArch64Operand::CreateFPImm(F, true, S, getContext())); 2595 } else { 2596 // Parse FP representation. 2597 APFloat RealVal(APFloat::IEEEdouble()); 2598 auto StatusOrErr = 2599 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero); 2600 if (errorToBool(StatusOrErr.takeError())) { 2601 TokError("invalid floating point representation"); 2602 return MatchOperand_ParseFail; 2603 } 2604 2605 if (isNegative) 2606 RealVal.changeSign(); 2607 2608 if (AddFPZeroAsLiteral && RealVal.isPosZero()) { 2609 Operands.push_back( 2610 AArch64Operand::CreateToken("#0", false, S, getContext())); 2611 Operands.push_back( 2612 AArch64Operand::CreateToken(".0", false, S, getContext())); 2613 } else 2614 Operands.push_back(AArch64Operand::CreateFPImm( 2615 RealVal, *StatusOrErr == APFloat::opOK, S, getContext())); 2616 } 2617 2618 Parser.Lex(); // Eat the token. 2619 2620 return MatchOperand_Success; 2621 } 2622 2623 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with 2624 /// a shift suffix, for example '#1, lsl #12'. 2625 OperandMatchResultTy 2626 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) { 2627 MCAsmParser &Parser = getParser(); 2628 SMLoc S = getLoc(); 2629 2630 if (Parser.getTok().is(AsmToken::Hash)) 2631 Parser.Lex(); // Eat '#' 2632 else if (Parser.getTok().isNot(AsmToken::Integer)) 2633 // Operand should start from # or should be integer, emit error otherwise. 2634 return MatchOperand_NoMatch; 2635 2636 const MCExpr *Imm = nullptr; 2637 if (parseSymbolicImmVal(Imm)) 2638 return MatchOperand_ParseFail; 2639 else if (Parser.getTok().isNot(AsmToken::Comma)) { 2640 SMLoc E = Parser.getTok().getLoc(); 2641 Operands.push_back( 2642 AArch64Operand::CreateImm(Imm, S, E, getContext())); 2643 return MatchOperand_Success; 2644 } 2645 2646 // Eat ',' 2647 Parser.Lex(); 2648 2649 // The optional operand must be "lsl #N" where N is non-negative. 2650 if (!Parser.getTok().is(AsmToken::Identifier) || 2651 !Parser.getTok().getIdentifier().equals_lower("lsl")) { 2652 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); 2653 return MatchOperand_ParseFail; 2654 } 2655 2656 // Eat 'lsl' 2657 Parser.Lex(); 2658 2659 parseOptionalToken(AsmToken::Hash); 2660 2661 if (Parser.getTok().isNot(AsmToken::Integer)) { 2662 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate"); 2663 return MatchOperand_ParseFail; 2664 } 2665 2666 int64_t ShiftAmount = Parser.getTok().getIntVal(); 2667 2668 if (ShiftAmount < 0) { 2669 Error(Parser.getTok().getLoc(), "positive shift amount required"); 2670 return MatchOperand_ParseFail; 2671 } 2672 Parser.Lex(); // Eat the number 2673 2674 // Just in case the optional lsl #0 is used for immediates other than zero. 2675 if (ShiftAmount == 0 && Imm != nullptr) { 2676 SMLoc E = Parser.getTok().getLoc(); 2677 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext())); 2678 return MatchOperand_Success; 2679 } 2680 2681 SMLoc E = Parser.getTok().getLoc(); 2682 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, 2683 S, E, getContext())); 2684 return MatchOperand_Success; 2685 } 2686 2687 /// parseCondCodeString - Parse a Condition Code string. 2688 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) { 2689 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) 2690 .Case("eq", AArch64CC::EQ) 2691 .Case("ne", AArch64CC::NE) 2692 .Case("cs", AArch64CC::HS) 2693 .Case("hs", AArch64CC::HS) 2694 .Case("cc", AArch64CC::LO) 2695 .Case("lo", AArch64CC::LO) 2696 .Case("mi", AArch64CC::MI) 2697 .Case("pl", AArch64CC::PL) 2698 .Case("vs", AArch64CC::VS) 2699 .Case("vc", AArch64CC::VC) 2700 .Case("hi", AArch64CC::HI) 2701 .Case("ls", AArch64CC::LS) 2702 .Case("ge", AArch64CC::GE) 2703 .Case("lt", AArch64CC::LT) 2704 .Case("gt", AArch64CC::GT) 2705 .Case("le", AArch64CC::LE) 2706 .Case("al", AArch64CC::AL) 2707 .Case("nv", AArch64CC::NV) 2708 .Default(AArch64CC::Invalid); 2709 2710 if (CC == AArch64CC::Invalid && 2711 getSTI().getFeatureBits()[AArch64::FeatureSVE]) 2712 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower()) 2713 .Case("none", AArch64CC::EQ) 2714 .Case("any", AArch64CC::NE) 2715 .Case("nlast", AArch64CC::HS) 2716 .Case("last", AArch64CC::LO) 2717 .Case("first", AArch64CC::MI) 2718 .Case("nfrst", AArch64CC::PL) 2719 .Case("pmore", AArch64CC::HI) 2720 .Case("plast", AArch64CC::LS) 2721 .Case("tcont", AArch64CC::GE) 2722 .Case("tstop", AArch64CC::LT) 2723 .Default(AArch64CC::Invalid); 2724 2725 return CC; 2726 } 2727 2728 /// parseCondCode - Parse a Condition Code operand. 2729 bool AArch64AsmParser::parseCondCode(OperandVector &Operands, 2730 bool invertCondCode) { 2731 MCAsmParser &Parser = getParser(); 2732 SMLoc S = getLoc(); 2733 const AsmToken &Tok = Parser.getTok(); 2734 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2735 2736 StringRef Cond = Tok.getString(); 2737 AArch64CC::CondCode CC = parseCondCodeString(Cond); 2738 if (CC == AArch64CC::Invalid) 2739 return TokError("invalid condition code"); 2740 Parser.Lex(); // Eat identifier token. 2741 2742 if (invertCondCode) { 2743 if (CC == AArch64CC::AL || CC == AArch64CC::NV) 2744 return TokError("condition codes AL and NV are invalid for this instruction"); 2745 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC)); 2746 } 2747 2748 Operands.push_back( 2749 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext())); 2750 return false; 2751 } 2752 2753 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse 2754 /// them if present. 2755 OperandMatchResultTy 2756 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) { 2757 MCAsmParser &Parser = getParser(); 2758 const AsmToken &Tok = Parser.getTok(); 2759 std::string LowerID = Tok.getString().lower(); 2760 AArch64_AM::ShiftExtendType ShOp = 2761 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID) 2762 .Case("lsl", AArch64_AM::LSL) 2763 .Case("lsr", AArch64_AM::LSR) 2764 .Case("asr", AArch64_AM::ASR) 2765 .Case("ror", AArch64_AM::ROR) 2766 .Case("msl", AArch64_AM::MSL) 2767 .Case("uxtb", AArch64_AM::UXTB) 2768 .Case("uxth", AArch64_AM::UXTH) 2769 .Case("uxtw", AArch64_AM::UXTW) 2770 .Case("uxtx", AArch64_AM::UXTX) 2771 .Case("sxtb", AArch64_AM::SXTB) 2772 .Case("sxth", AArch64_AM::SXTH) 2773 .Case("sxtw", AArch64_AM::SXTW) 2774 .Case("sxtx", AArch64_AM::SXTX) 2775 .Default(AArch64_AM::InvalidShiftExtend); 2776 2777 if (ShOp == AArch64_AM::InvalidShiftExtend) 2778 return MatchOperand_NoMatch; 2779 2780 SMLoc S = Tok.getLoc(); 2781 Parser.Lex(); 2782 2783 bool Hash = parseOptionalToken(AsmToken::Hash); 2784 2785 if (!Hash && getLexer().isNot(AsmToken::Integer)) { 2786 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || 2787 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR || 2788 ShOp == AArch64_AM::MSL) { 2789 // We expect a number here. 2790 TokError("expected #imm after shift specifier"); 2791 return MatchOperand_ParseFail; 2792 } 2793 2794 // "extend" type operations don't need an immediate, #0 is implicit. 2795 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2796 Operands.push_back( 2797 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext())); 2798 return MatchOperand_Success; 2799 } 2800 2801 // Make sure we do actually have a number, identifier or a parenthesized 2802 // expression. 2803 SMLoc E = Parser.getTok().getLoc(); 2804 if (!Parser.getTok().is(AsmToken::Integer) && 2805 !Parser.getTok().is(AsmToken::LParen) && 2806 !Parser.getTok().is(AsmToken::Identifier)) { 2807 Error(E, "expected integer shift amount"); 2808 return MatchOperand_ParseFail; 2809 } 2810 2811 const MCExpr *ImmVal; 2812 if (getParser().parseExpression(ImmVal)) 2813 return MatchOperand_ParseFail; 2814 2815 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2816 if (!MCE) { 2817 Error(E, "expected constant '#imm' after shift specifier"); 2818 return MatchOperand_ParseFail; 2819 } 2820 2821 E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2822 Operands.push_back(AArch64Operand::CreateShiftExtend( 2823 ShOp, MCE->getValue(), true, S, E, getContext())); 2824 return MatchOperand_Success; 2825 } 2826 2827 static const struct Extension { 2828 const char *Name; 2829 const FeatureBitset Features; 2830 } ExtensionMap[] = { 2831 {"crc", {AArch64::FeatureCRC}}, 2832 {"sm4", {AArch64::FeatureSM4}}, 2833 {"sha3", {AArch64::FeatureSHA3}}, 2834 {"sha2", {AArch64::FeatureSHA2}}, 2835 {"aes", {AArch64::FeatureAES}}, 2836 {"crypto", {AArch64::FeatureCrypto}}, 2837 {"fp", {AArch64::FeatureFPARMv8}}, 2838 {"simd", {AArch64::FeatureNEON}}, 2839 {"ras", {AArch64::FeatureRAS}}, 2840 {"lse", {AArch64::FeatureLSE}}, 2841 {"predres", {AArch64::FeaturePredRes}}, 2842 {"ccdp", {AArch64::FeatureCacheDeepPersist}}, 2843 {"mte", {AArch64::FeatureMTE}}, 2844 {"tlb-rmi", {AArch64::FeatureTLB_RMI}}, 2845 {"pan-rwv", {AArch64::FeaturePAN_RWV}}, 2846 {"ccpp", {AArch64::FeatureCCPP}}, 2847 {"rcpc", {AArch64::FeatureRCPC}}, 2848 {"sve", {AArch64::FeatureSVE}}, 2849 {"sve2", {AArch64::FeatureSVE2}}, 2850 {"sve2-aes", {AArch64::FeatureSVE2AES}}, 2851 {"sve2-sm4", {AArch64::FeatureSVE2SM4}}, 2852 {"sve2-sha3", {AArch64::FeatureSVE2SHA3}}, 2853 {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}}, 2854 // FIXME: Unsupported extensions 2855 {"pan", {}}, 2856 {"lor", {}}, 2857 {"rdma", {}}, 2858 {"profile", {}}, 2859 }; 2860 2861 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) { 2862 if (FBS[AArch64::HasV8_1aOps]) 2863 Str += "ARMv8.1a"; 2864 else if (FBS[AArch64::HasV8_2aOps]) 2865 Str += "ARMv8.2a"; 2866 else if (FBS[AArch64::HasV8_3aOps]) 2867 Str += "ARMv8.3a"; 2868 else if (FBS[AArch64::HasV8_4aOps]) 2869 Str += "ARMv8.4a"; 2870 else if (FBS[AArch64::HasV8_5aOps]) 2871 Str += "ARMv8.5a"; 2872 else if (FBS[AArch64::HasV8_6aOps]) 2873 Str += "ARMv8.6a"; 2874 else { 2875 auto ext = std::find_if(std::begin(ExtensionMap), 2876 std::end(ExtensionMap), 2877 [&](const Extension& e) 2878 // Use & in case multiple features are enabled 2879 { return (FBS & e.Features) != FeatureBitset(); } 2880 ); 2881 2882 Str += ext != std::end(ExtensionMap) ? ext->Name : "(unknown)"; 2883 } 2884 } 2885 2886 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands, 2887 SMLoc S) { 2888 const uint16_t Op2 = Encoding & 7; 2889 const uint16_t Cm = (Encoding & 0x78) >> 3; 2890 const uint16_t Cn = (Encoding & 0x780) >> 7; 2891 const uint16_t Op1 = (Encoding & 0x3800) >> 11; 2892 2893 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext()); 2894 2895 Operands.push_back( 2896 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); 2897 Operands.push_back( 2898 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); 2899 Operands.push_back( 2900 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); 2901 Expr = MCConstantExpr::create(Op2, getContext()); 2902 Operands.push_back( 2903 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); 2904 } 2905 2906 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for 2907 /// the SYS instruction. Parse them specially so that we create a SYS MCInst. 2908 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc, 2909 OperandVector &Operands) { 2910 if (Name.find('.') != StringRef::npos) 2911 return TokError("invalid operand"); 2912 2913 Mnemonic = Name; 2914 Operands.push_back( 2915 AArch64Operand::CreateToken("sys", false, NameLoc, getContext())); 2916 2917 MCAsmParser &Parser = getParser(); 2918 const AsmToken &Tok = Parser.getTok(); 2919 StringRef Op = Tok.getString(); 2920 SMLoc S = Tok.getLoc(); 2921 2922 if (Mnemonic == "ic") { 2923 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op); 2924 if (!IC) 2925 return TokError("invalid operand for IC instruction"); 2926 else if (!IC->haveFeatures(getSTI().getFeatureBits())) { 2927 std::string Str("IC " + std::string(IC->Name) + " requires "); 2928 setRequiredFeatureString(IC->getRequiredFeatures(), Str); 2929 return TokError(Str.c_str()); 2930 } 2931 createSysAlias(IC->Encoding, Operands, S); 2932 } else if (Mnemonic == "dc") { 2933 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op); 2934 if (!DC) 2935 return TokError("invalid operand for DC instruction"); 2936 else if (!DC->haveFeatures(getSTI().getFeatureBits())) { 2937 std::string Str("DC " + std::string(DC->Name) + " requires "); 2938 setRequiredFeatureString(DC->getRequiredFeatures(), Str); 2939 return TokError(Str.c_str()); 2940 } 2941 createSysAlias(DC->Encoding, Operands, S); 2942 } else if (Mnemonic == "at") { 2943 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op); 2944 if (!AT) 2945 return TokError("invalid operand for AT instruction"); 2946 else if (!AT->haveFeatures(getSTI().getFeatureBits())) { 2947 std::string Str("AT " + std::string(AT->Name) + " requires "); 2948 setRequiredFeatureString(AT->getRequiredFeatures(), Str); 2949 return TokError(Str.c_str()); 2950 } 2951 createSysAlias(AT->Encoding, Operands, S); 2952 } else if (Mnemonic == "tlbi") { 2953 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op); 2954 if (!TLBI) 2955 return TokError("invalid operand for TLBI instruction"); 2956 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) { 2957 std::string Str("TLBI " + std::string(TLBI->Name) + " requires "); 2958 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str); 2959 return TokError(Str.c_str()); 2960 } 2961 createSysAlias(TLBI->Encoding, Operands, S); 2962 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") { 2963 const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op); 2964 if (!PRCTX) 2965 return TokError("invalid operand for prediction restriction instruction"); 2966 else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) { 2967 std::string Str( 2968 Mnemonic.upper() + std::string(PRCTX->Name) + " requires "); 2969 setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str); 2970 return TokError(Str.c_str()); 2971 } 2972 uint16_t PRCTX_Op2 = 2973 Mnemonic == "cfp" ? 4 : 2974 Mnemonic == "dvp" ? 5 : 2975 Mnemonic == "cpp" ? 7 : 2976 0; 2977 assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction"); 2978 createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S); 2979 } 2980 2981 Parser.Lex(); // Eat operand. 2982 2983 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos); 2984 bool HasRegister = false; 2985 2986 // Check for the optional register operand. 2987 if (parseOptionalToken(AsmToken::Comma)) { 2988 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands)) 2989 return TokError("expected register operand"); 2990 HasRegister = true; 2991 } 2992 2993 if (ExpectRegister && !HasRegister) 2994 return TokError("specified " + Mnemonic + " op requires a register"); 2995 else if (!ExpectRegister && HasRegister) 2996 return TokError("specified " + Mnemonic + " op does not use a register"); 2997 2998 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) 2999 return true; 3000 3001 return false; 3002 } 3003 3004 OperandMatchResultTy 3005 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) { 3006 MCAsmParser &Parser = getParser(); 3007 const AsmToken &Tok = Parser.getTok(); 3008 3009 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) { 3010 TokError("'csync' operand expected"); 3011 return MatchOperand_ParseFail; 3012 // Can be either a #imm style literal or an option name 3013 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) { 3014 // Immediate operand. 3015 const MCExpr *ImmVal; 3016 SMLoc ExprLoc = getLoc(); 3017 if (getParser().parseExpression(ImmVal)) 3018 return MatchOperand_ParseFail; 3019 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 3020 if (!MCE) { 3021 Error(ExprLoc, "immediate value expected for barrier operand"); 3022 return MatchOperand_ParseFail; 3023 } 3024 if (MCE->getValue() < 0 || MCE->getValue() > 15) { 3025 Error(ExprLoc, "barrier operand out of range"); 3026 return MatchOperand_ParseFail; 3027 } 3028 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue()); 3029 Operands.push_back(AArch64Operand::CreateBarrier( 3030 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext())); 3031 return MatchOperand_Success; 3032 } 3033 3034 if (Tok.isNot(AsmToken::Identifier)) { 3035 TokError("invalid operand for instruction"); 3036 return MatchOperand_ParseFail; 3037 } 3038 3039 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString()); 3040 // The only valid named option for ISB is 'sy' 3041 auto DB = AArch64DB::lookupDBByName(Tok.getString()); 3042 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) { 3043 TokError("'sy' or #imm operand expected"); 3044 return MatchOperand_ParseFail; 3045 // The only valid named option for TSB is 'csync' 3046 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) { 3047 TokError("'csync' operand expected"); 3048 return MatchOperand_ParseFail; 3049 } else if (!DB && !TSB) { 3050 TokError("invalid barrier option name"); 3051 return MatchOperand_ParseFail; 3052 } 3053 3054 Operands.push_back(AArch64Operand::CreateBarrier( 3055 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext())); 3056 Parser.Lex(); // Consume the option 3057 3058 return MatchOperand_Success; 3059 } 3060 3061 OperandMatchResultTy 3062 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) { 3063 MCAsmParser &Parser = getParser(); 3064 const AsmToken &Tok = Parser.getTok(); 3065 3066 if (Tok.isNot(AsmToken::Identifier)) 3067 return MatchOperand_NoMatch; 3068 3069 int MRSReg, MSRReg; 3070 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString()); 3071 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) { 3072 MRSReg = SysReg->Readable ? SysReg->Encoding : -1; 3073 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1; 3074 } else 3075 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString()); 3076 3077 auto PState = AArch64PState::lookupPStateByName(Tok.getString()); 3078 unsigned PStateImm = -1; 3079 if (PState && PState->haveFeatures(getSTI().getFeatureBits())) 3080 PStateImm = PState->Encoding; 3081 3082 Operands.push_back( 3083 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg, 3084 PStateImm, getContext())); 3085 Parser.Lex(); // Eat identifier 3086 3087 return MatchOperand_Success; 3088 } 3089 3090 /// tryParseNeonVectorRegister - Parse a vector register operand. 3091 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) { 3092 MCAsmParser &Parser = getParser(); 3093 if (Parser.getTok().isNot(AsmToken::Identifier)) 3094 return true; 3095 3096 SMLoc S = getLoc(); 3097 // Check for a vector register specifier first. 3098 StringRef Kind; 3099 unsigned Reg; 3100 OperandMatchResultTy Res = 3101 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector); 3102 if (Res != MatchOperand_Success) 3103 return true; 3104 3105 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector); 3106 if (!KindRes) 3107 return true; 3108 3109 unsigned ElementWidth = KindRes->second; 3110 Operands.push_back( 3111 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth, 3112 S, getLoc(), getContext())); 3113 3114 // If there was an explicit qualifier, that goes on as a literal text 3115 // operand. 3116 if (!Kind.empty()) 3117 Operands.push_back( 3118 AArch64Operand::CreateToken(Kind, false, S, getContext())); 3119 3120 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; 3121 } 3122 3123 OperandMatchResultTy 3124 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) { 3125 SMLoc SIdx = getLoc(); 3126 if (parseOptionalToken(AsmToken::LBrac)) { 3127 const MCExpr *ImmVal; 3128 if (getParser().parseExpression(ImmVal)) 3129 return MatchOperand_NoMatch; 3130 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 3131 if (!MCE) { 3132 TokError("immediate value expected for vector index"); 3133 return MatchOperand_ParseFail;; 3134 } 3135 3136 SMLoc E = getLoc(); 3137 3138 if (parseToken(AsmToken::RBrac, "']' expected")) 3139 return MatchOperand_ParseFail;; 3140 3141 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx, 3142 E, getContext())); 3143 return MatchOperand_Success; 3144 } 3145 3146 return MatchOperand_NoMatch; 3147 } 3148 3149 // tryParseVectorRegister - Try to parse a vector register name with 3150 // optional kind specifier. If it is a register specifier, eat the token 3151 // and return it. 3152 OperandMatchResultTy 3153 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind, 3154 RegKind MatchKind) { 3155 MCAsmParser &Parser = getParser(); 3156 const AsmToken &Tok = Parser.getTok(); 3157 3158 if (Tok.isNot(AsmToken::Identifier)) 3159 return MatchOperand_NoMatch; 3160 3161 StringRef Name = Tok.getString(); 3162 // If there is a kind specifier, it's separated from the register name by 3163 // a '.'. 3164 size_t Start = 0, Next = Name.find('.'); 3165 StringRef Head = Name.slice(Start, Next); 3166 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind); 3167 3168 if (RegNum) { 3169 if (Next != StringRef::npos) { 3170 Kind = Name.slice(Next, StringRef::npos); 3171 if (!isValidVectorKind(Kind, MatchKind)) { 3172 TokError("invalid vector kind qualifier"); 3173 return MatchOperand_ParseFail; 3174 } 3175 } 3176 Parser.Lex(); // Eat the register token. 3177 3178 Reg = RegNum; 3179 return MatchOperand_Success; 3180 } 3181 3182 return MatchOperand_NoMatch; 3183 } 3184 3185 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand. 3186 OperandMatchResultTy 3187 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) { 3188 // Check for a SVE predicate register specifier first. 3189 const SMLoc S = getLoc(); 3190 StringRef Kind; 3191 unsigned RegNum; 3192 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); 3193 if (Res != MatchOperand_Success) 3194 return Res; 3195 3196 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector); 3197 if (!KindRes) 3198 return MatchOperand_NoMatch; 3199 3200 unsigned ElementWidth = KindRes->second; 3201 Operands.push_back(AArch64Operand::CreateVectorReg( 3202 RegNum, RegKind::SVEPredicateVector, ElementWidth, S, 3203 getLoc(), getContext())); 3204 3205 // Not all predicates are followed by a '/m' or '/z'. 3206 MCAsmParser &Parser = getParser(); 3207 if (Parser.getTok().isNot(AsmToken::Slash)) 3208 return MatchOperand_Success; 3209 3210 // But when they do they shouldn't have an element type suffix. 3211 if (!Kind.empty()) { 3212 Error(S, "not expecting size suffix"); 3213 return MatchOperand_ParseFail; 3214 } 3215 3216 // Add a literal slash as operand 3217 Operands.push_back( 3218 AArch64Operand::CreateToken("/" , false, getLoc(), getContext())); 3219 3220 Parser.Lex(); // Eat the slash. 3221 3222 // Zeroing or merging? 3223 auto Pred = Parser.getTok().getString().lower(); 3224 if (Pred != "z" && Pred != "m") { 3225 Error(getLoc(), "expecting 'm' or 'z' predication"); 3226 return MatchOperand_ParseFail; 3227 } 3228 3229 // Add zero/merge token. 3230 const char *ZM = Pred == "z" ? "z" : "m"; 3231 Operands.push_back( 3232 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext())); 3233 3234 Parser.Lex(); // Eat zero/merge token. 3235 return MatchOperand_Success; 3236 } 3237 3238 /// parseRegister - Parse a register operand. 3239 bool AArch64AsmParser::parseRegister(OperandVector &Operands) { 3240 // Try for a Neon vector register. 3241 if (!tryParseNeonVectorRegister(Operands)) 3242 return false; 3243 3244 // Otherwise try for a scalar register. 3245 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success) 3246 return false; 3247 3248 return true; 3249 } 3250 3251 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) { 3252 MCAsmParser &Parser = getParser(); 3253 bool HasELFModifier = false; 3254 AArch64MCExpr::VariantKind RefKind; 3255 3256 if (parseOptionalToken(AsmToken::Colon)) { 3257 HasELFModifier = true; 3258 3259 if (Parser.getTok().isNot(AsmToken::Identifier)) 3260 return TokError("expect relocation specifier in operand after ':'"); 3261 3262 std::string LowerCase = Parser.getTok().getIdentifier().lower(); 3263 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase) 3264 .Case("lo12", AArch64MCExpr::VK_LO12) 3265 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3) 3266 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2) 3267 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S) 3268 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC) 3269 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1) 3270 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S) 3271 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC) 3272 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0) 3273 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S) 3274 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC) 3275 .Case("prel_g3", AArch64MCExpr::VK_PREL_G3) 3276 .Case("prel_g2", AArch64MCExpr::VK_PREL_G2) 3277 .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC) 3278 .Case("prel_g1", AArch64MCExpr::VK_PREL_G1) 3279 .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC) 3280 .Case("prel_g0", AArch64MCExpr::VK_PREL_G0) 3281 .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC) 3282 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2) 3283 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1) 3284 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC) 3285 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0) 3286 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC) 3287 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12) 3288 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12) 3289 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC) 3290 .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC) 3291 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2) 3292 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1) 3293 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC) 3294 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0) 3295 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC) 3296 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12) 3297 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12) 3298 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC) 3299 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12) 3300 .Case("got", AArch64MCExpr::VK_GOT_PAGE) 3301 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12) 3302 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE) 3303 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC) 3304 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1) 3305 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC) 3306 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE) 3307 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12) 3308 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12) 3309 .Default(AArch64MCExpr::VK_INVALID); 3310 3311 if (RefKind == AArch64MCExpr::VK_INVALID) 3312 return TokError("expect relocation specifier in operand after ':'"); 3313 3314 Parser.Lex(); // Eat identifier 3315 3316 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier")) 3317 return true; 3318 } 3319 3320 if (getParser().parseExpression(ImmVal)) 3321 return true; 3322 3323 if (HasELFModifier) 3324 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext()); 3325 3326 return false; 3327 } 3328 3329 template <RegKind VectorKind> 3330 OperandMatchResultTy 3331 AArch64AsmParser::tryParseVectorList(OperandVector &Operands, 3332 bool ExpectMatch) { 3333 MCAsmParser &Parser = getParser(); 3334 if (!Parser.getTok().is(AsmToken::LCurly)) 3335 return MatchOperand_NoMatch; 3336 3337 // Wrapper around parse function 3338 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc, 3339 bool NoMatchIsError) { 3340 auto RegTok = Parser.getTok(); 3341 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind); 3342 if (ParseRes == MatchOperand_Success) { 3343 if (parseVectorKind(Kind, VectorKind)) 3344 return ParseRes; 3345 llvm_unreachable("Expected a valid vector kind"); 3346 } 3347 3348 if (RegTok.isNot(AsmToken::Identifier) || 3349 ParseRes == MatchOperand_ParseFail || 3350 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) { 3351 Error(Loc, "vector register expected"); 3352 return MatchOperand_ParseFail; 3353 } 3354 3355 return MatchOperand_NoMatch; 3356 }; 3357 3358 SMLoc S = getLoc(); 3359 auto LCurly = Parser.getTok(); 3360 Parser.Lex(); // Eat left bracket token. 3361 3362 StringRef Kind; 3363 unsigned FirstReg; 3364 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch); 3365 3366 // Put back the original left bracket if there was no match, so that 3367 // different types of list-operands can be matched (e.g. SVE, Neon). 3368 if (ParseRes == MatchOperand_NoMatch) 3369 Parser.getLexer().UnLex(LCurly); 3370 3371 if (ParseRes != MatchOperand_Success) 3372 return ParseRes; 3373 3374 int64_t PrevReg = FirstReg; 3375 unsigned Count = 1; 3376 3377 if (parseOptionalToken(AsmToken::Minus)) { 3378 SMLoc Loc = getLoc(); 3379 StringRef NextKind; 3380 3381 unsigned Reg; 3382 ParseRes = ParseVector(Reg, NextKind, getLoc(), true); 3383 if (ParseRes != MatchOperand_Success) 3384 return ParseRes; 3385 3386 // Any Kind suffices must match on all regs in the list. 3387 if (Kind != NextKind) { 3388 Error(Loc, "mismatched register size suffix"); 3389 return MatchOperand_ParseFail; 3390 } 3391 3392 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg); 3393 3394 if (Space == 0 || Space > 3) { 3395 Error(Loc, "invalid number of vectors"); 3396 return MatchOperand_ParseFail; 3397 } 3398 3399 Count += Space; 3400 } 3401 else { 3402 while (parseOptionalToken(AsmToken::Comma)) { 3403 SMLoc Loc = getLoc(); 3404 StringRef NextKind; 3405 unsigned Reg; 3406 ParseRes = ParseVector(Reg, NextKind, getLoc(), true); 3407 if (ParseRes != MatchOperand_Success) 3408 return ParseRes; 3409 3410 // Any Kind suffices must match on all regs in the list. 3411 if (Kind != NextKind) { 3412 Error(Loc, "mismatched register size suffix"); 3413 return MatchOperand_ParseFail; 3414 } 3415 3416 // Registers must be incremental (with wraparound at 31) 3417 if (getContext().getRegisterInfo()->getEncodingValue(Reg) != 3418 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) { 3419 Error(Loc, "registers must be sequential"); 3420 return MatchOperand_ParseFail; 3421 } 3422 3423 PrevReg = Reg; 3424 ++Count; 3425 } 3426 } 3427 3428 if (parseToken(AsmToken::RCurly, "'}' expected")) 3429 return MatchOperand_ParseFail; 3430 3431 if (Count > 4) { 3432 Error(S, "invalid number of vectors"); 3433 return MatchOperand_ParseFail; 3434 } 3435 3436 unsigned NumElements = 0; 3437 unsigned ElementWidth = 0; 3438 if (!Kind.empty()) { 3439 if (const auto &VK = parseVectorKind(Kind, VectorKind)) 3440 std::tie(NumElements, ElementWidth) = *VK; 3441 } 3442 3443 Operands.push_back(AArch64Operand::CreateVectorList( 3444 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(), 3445 getContext())); 3446 3447 return MatchOperand_Success; 3448 } 3449 3450 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions. 3451 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) { 3452 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true); 3453 if (ParseRes != MatchOperand_Success) 3454 return true; 3455 3456 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail; 3457 } 3458 3459 OperandMatchResultTy 3460 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) { 3461 SMLoc StartLoc = getLoc(); 3462 3463 unsigned RegNum; 3464 OperandMatchResultTy Res = tryParseScalarRegister(RegNum); 3465 if (Res != MatchOperand_Success) 3466 return Res; 3467 3468 if (!parseOptionalToken(AsmToken::Comma)) { 3469 Operands.push_back(AArch64Operand::CreateReg( 3470 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); 3471 return MatchOperand_Success; 3472 } 3473 3474 parseOptionalToken(AsmToken::Hash); 3475 3476 if (getParser().getTok().isNot(AsmToken::Integer)) { 3477 Error(getLoc(), "index must be absent or #0"); 3478 return MatchOperand_ParseFail; 3479 } 3480 3481 const MCExpr *ImmVal; 3482 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) || 3483 cast<MCConstantExpr>(ImmVal)->getValue() != 0) { 3484 Error(getLoc(), "index must be absent or #0"); 3485 return MatchOperand_ParseFail; 3486 } 3487 3488 Operands.push_back(AArch64Operand::CreateReg( 3489 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext())); 3490 return MatchOperand_Success; 3491 } 3492 3493 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy> 3494 OperandMatchResultTy 3495 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) { 3496 SMLoc StartLoc = getLoc(); 3497 3498 unsigned RegNum; 3499 OperandMatchResultTy Res = tryParseScalarRegister(RegNum); 3500 if (Res != MatchOperand_Success) 3501 return Res; 3502 3503 // No shift/extend is the default. 3504 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { 3505 Operands.push_back(AArch64Operand::CreateReg( 3506 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy)); 3507 return MatchOperand_Success; 3508 } 3509 3510 // Eat the comma 3511 getParser().Lex(); 3512 3513 // Match the shift 3514 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; 3515 Res = tryParseOptionalShiftExtend(ExtOpnd); 3516 if (Res != MatchOperand_Success) 3517 return Res; 3518 3519 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get()); 3520 Operands.push_back(AArch64Operand::CreateReg( 3521 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy, 3522 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), 3523 Ext->hasShiftExtendAmount())); 3524 3525 return MatchOperand_Success; 3526 } 3527 3528 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) { 3529 MCAsmParser &Parser = getParser(); 3530 3531 // Some SVE instructions have a decoration after the immediate, i.e. 3532 // "mul vl". We parse them here and add tokens, which must be present in the 3533 // asm string in the tablegen instruction. 3534 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl"); 3535 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash); 3536 if (!Parser.getTok().getString().equals_lower("mul") || 3537 !(NextIsVL || NextIsHash)) 3538 return true; 3539 3540 Operands.push_back( 3541 AArch64Operand::CreateToken("mul", false, getLoc(), getContext())); 3542 Parser.Lex(); // Eat the "mul" 3543 3544 if (NextIsVL) { 3545 Operands.push_back( 3546 AArch64Operand::CreateToken("vl", false, getLoc(), getContext())); 3547 Parser.Lex(); // Eat the "vl" 3548 return false; 3549 } 3550 3551 if (NextIsHash) { 3552 Parser.Lex(); // Eat the # 3553 SMLoc S = getLoc(); 3554 3555 // Parse immediate operand. 3556 const MCExpr *ImmVal; 3557 if (!Parser.parseExpression(ImmVal)) 3558 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) { 3559 Operands.push_back(AArch64Operand::CreateImm( 3560 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(), 3561 getContext())); 3562 return MatchOperand_Success; 3563 } 3564 } 3565 3566 return Error(getLoc(), "expected 'vl' or '#<imm>'"); 3567 } 3568 3569 /// parseOperand - Parse a arm instruction operand. For now this parses the 3570 /// operand regardless of the mnemonic. 3571 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode, 3572 bool invertCondCode) { 3573 MCAsmParser &Parser = getParser(); 3574 3575 OperandMatchResultTy ResTy = 3576 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true); 3577 3578 // Check if the current operand has a custom associated parser, if so, try to 3579 // custom parse the operand, or fallback to the general approach. 3580 if (ResTy == MatchOperand_Success) 3581 return false; 3582 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3583 // there was a match, but an error occurred, in which case, just return that 3584 // the operand parsing failed. 3585 if (ResTy == MatchOperand_ParseFail) 3586 return true; 3587 3588 // Nothing custom, so do general case parsing. 3589 SMLoc S, E; 3590 switch (getLexer().getKind()) { 3591 default: { 3592 SMLoc S = getLoc(); 3593 const MCExpr *Expr; 3594 if (parseSymbolicImmVal(Expr)) 3595 return Error(S, "invalid operand"); 3596 3597 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 3598 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext())); 3599 return false; 3600 } 3601 case AsmToken::LBrac: { 3602 SMLoc Loc = Parser.getTok().getLoc(); 3603 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc, 3604 getContext())); 3605 Parser.Lex(); // Eat '[' 3606 3607 // There's no comma after a '[', so we can parse the next operand 3608 // immediately. 3609 return parseOperand(Operands, false, false); 3610 } 3611 case AsmToken::LCurly: 3612 return parseNeonVectorList(Operands); 3613 case AsmToken::Identifier: { 3614 // If we're expecting a Condition Code operand, then just parse that. 3615 if (isCondCode) 3616 return parseCondCode(Operands, invertCondCode); 3617 3618 // If it's a register name, parse it. 3619 if (!parseRegister(Operands)) 3620 return false; 3621 3622 // See if this is a "mul vl" decoration or "mul #<int>" operand used 3623 // by SVE instructions. 3624 if (!parseOptionalMulOperand(Operands)) 3625 return false; 3626 3627 // This could be an optional "shift" or "extend" operand. 3628 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands); 3629 // We can only continue if no tokens were eaten. 3630 if (GotShift != MatchOperand_NoMatch) 3631 return GotShift; 3632 3633 // This was not a register so parse other operands that start with an 3634 // identifier (like labels) as expressions and create them as immediates. 3635 const MCExpr *IdVal; 3636 S = getLoc(); 3637 if (getParser().parseExpression(IdVal)) 3638 return true; 3639 E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 3640 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext())); 3641 return false; 3642 } 3643 case AsmToken::Integer: 3644 case AsmToken::Real: 3645 case AsmToken::Hash: { 3646 // #42 -> immediate. 3647 S = getLoc(); 3648 3649 parseOptionalToken(AsmToken::Hash); 3650 3651 // Parse a negative sign 3652 bool isNegative = false; 3653 if (Parser.getTok().is(AsmToken::Minus)) { 3654 isNegative = true; 3655 // We need to consume this token only when we have a Real, otherwise 3656 // we let parseSymbolicImmVal take care of it 3657 if (Parser.getLexer().peekTok().is(AsmToken::Real)) 3658 Parser.Lex(); 3659 } 3660 3661 // The only Real that should come through here is a literal #0.0 for 3662 // the fcmp[e] r, #0.0 instructions. They expect raw token operands, 3663 // so convert the value. 3664 const AsmToken &Tok = Parser.getTok(); 3665 if (Tok.is(AsmToken::Real)) { 3666 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString()); 3667 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3668 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" && 3669 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" && 3670 Mnemonic != "fcmlt" && Mnemonic != "fcmne") 3671 return TokError("unexpected floating point literal"); 3672 else if (IntVal != 0 || isNegative) 3673 return TokError("expected floating-point constant #0.0"); 3674 Parser.Lex(); // Eat the token. 3675 3676 Operands.push_back( 3677 AArch64Operand::CreateToken("#0", false, S, getContext())); 3678 Operands.push_back( 3679 AArch64Operand::CreateToken(".0", false, S, getContext())); 3680 return false; 3681 } 3682 3683 const MCExpr *ImmVal; 3684 if (parseSymbolicImmVal(ImmVal)) 3685 return true; 3686 3687 E = SMLoc::getFromPointer(getLoc().getPointer() - 1); 3688 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext())); 3689 return false; 3690 } 3691 case AsmToken::Equal: { 3692 SMLoc Loc = getLoc(); 3693 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) 3694 return TokError("unexpected token in operand"); 3695 Parser.Lex(); // Eat '=' 3696 const MCExpr *SubExprVal; 3697 if (getParser().parseExpression(SubExprVal)) 3698 return true; 3699 3700 if (Operands.size() < 2 || 3701 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg()) 3702 return Error(Loc, "Only valid when first operand is register"); 3703 3704 bool IsXReg = 3705 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 3706 Operands[1]->getReg()); 3707 3708 MCContext& Ctx = getContext(); 3709 E = SMLoc::getFromPointer(Loc.getPointer() - 1); 3710 // If the op is an imm and can be fit into a mov, then replace ldr with mov. 3711 if (isa<MCConstantExpr>(SubExprVal)) { 3712 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue(); 3713 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; 3714 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) { 3715 ShiftAmt += 16; 3716 Imm >>= 16; 3717 } 3718 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { 3719 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx); 3720 Operands.push_back(AArch64Operand::CreateImm( 3721 MCConstantExpr::create(Imm, Ctx), S, E, Ctx)); 3722 if (ShiftAmt) 3723 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL, 3724 ShiftAmt, true, S, E, Ctx)); 3725 return false; 3726 } 3727 APInt Simm = APInt(64, Imm << ShiftAmt); 3728 // check if the immediate is an unsigned or signed 32-bit int for W regs 3729 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32))) 3730 return Error(Loc, "Immediate too large for register"); 3731 } 3732 // If it is a label or an imm that cannot fit in a movz, put it into CP. 3733 const MCExpr *CPLoc = 3734 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc); 3735 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx)); 3736 return false; 3737 } 3738 } 3739 } 3740 3741 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1, 3742 const MCParsedAsmOperand &Op2) const { 3743 auto &AOp1 = static_cast<const AArch64Operand&>(Op1); 3744 auto &AOp2 = static_cast<const AArch64Operand&>(Op2); 3745 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg && 3746 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg) 3747 return MCTargetAsmParser::regsEqual(Op1, Op2); 3748 3749 assert(AOp1.isScalarReg() && AOp2.isScalarReg() && 3750 "Testing equality of non-scalar registers not supported"); 3751 3752 // Check if a registers match their sub/super register classes. 3753 if (AOp1.getRegEqualityTy() == EqualsSuperReg) 3754 return getXRegFromWReg(Op1.getReg()) == Op2.getReg(); 3755 if (AOp1.getRegEqualityTy() == EqualsSubReg) 3756 return getWRegFromXReg(Op1.getReg()) == Op2.getReg(); 3757 if (AOp2.getRegEqualityTy() == EqualsSuperReg) 3758 return getXRegFromWReg(Op2.getReg()) == Op1.getReg(); 3759 if (AOp2.getRegEqualityTy() == EqualsSubReg) 3760 return getWRegFromXReg(Op2.getReg()) == Op1.getReg(); 3761 3762 return false; 3763 } 3764 3765 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its 3766 /// operands. 3767 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info, 3768 StringRef Name, SMLoc NameLoc, 3769 OperandVector &Operands) { 3770 MCAsmParser &Parser = getParser(); 3771 Name = StringSwitch<StringRef>(Name.lower()) 3772 .Case("beq", "b.eq") 3773 .Case("bne", "b.ne") 3774 .Case("bhs", "b.hs") 3775 .Case("bcs", "b.cs") 3776 .Case("blo", "b.lo") 3777 .Case("bcc", "b.cc") 3778 .Case("bmi", "b.mi") 3779 .Case("bpl", "b.pl") 3780 .Case("bvs", "b.vs") 3781 .Case("bvc", "b.vc") 3782 .Case("bhi", "b.hi") 3783 .Case("bls", "b.ls") 3784 .Case("bge", "b.ge") 3785 .Case("blt", "b.lt") 3786 .Case("bgt", "b.gt") 3787 .Case("ble", "b.le") 3788 .Case("bal", "b.al") 3789 .Case("bnv", "b.nv") 3790 .Default(Name); 3791 3792 // First check for the AArch64-specific .req directive. 3793 if (Parser.getTok().is(AsmToken::Identifier) && 3794 Parser.getTok().getIdentifier().lower() == ".req") { 3795 parseDirectiveReq(Name, NameLoc); 3796 // We always return 'error' for this, as we're done with this 3797 // statement and don't need to match the 'instruction." 3798 return true; 3799 } 3800 3801 // Create the leading tokens for the mnemonic, split by '.' characters. 3802 size_t Start = 0, Next = Name.find('.'); 3803 StringRef Head = Name.slice(Start, Next); 3804 3805 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for 3806 // the SYS instruction. 3807 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" || 3808 Head == "cfp" || Head == "dvp" || Head == "cpp") 3809 return parseSysAlias(Head, NameLoc, Operands); 3810 3811 Operands.push_back( 3812 AArch64Operand::CreateToken(Head, false, NameLoc, getContext())); 3813 Mnemonic = Head; 3814 3815 // Handle condition codes for a branch mnemonic 3816 if (Head == "b" && Next != StringRef::npos) { 3817 Start = Next; 3818 Next = Name.find('.', Start + 1); 3819 Head = Name.slice(Start + 1, Next); 3820 3821 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + 3822 (Head.data() - Name.data())); 3823 AArch64CC::CondCode CC = parseCondCodeString(Head); 3824 if (CC == AArch64CC::Invalid) 3825 return Error(SuffixLoc, "invalid condition code"); 3826 Operands.push_back( 3827 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext())); 3828 Operands.push_back( 3829 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext())); 3830 } 3831 3832 // Add the remaining tokens in the mnemonic. 3833 while (Next != StringRef::npos) { 3834 Start = Next; 3835 Next = Name.find('.', Start + 1); 3836 Head = Name.slice(Start, Next); 3837 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() + 3838 (Head.data() - Name.data()) + 1); 3839 Operands.push_back( 3840 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext())); 3841 } 3842 3843 // Conditional compare instructions have a Condition Code operand, which needs 3844 // to be parsed and an immediate operand created. 3845 bool condCodeFourthOperand = 3846 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" || 3847 Head == "fccmpe" || Head == "fcsel" || Head == "csel" || 3848 Head == "csinc" || Head == "csinv" || Head == "csneg"); 3849 3850 // These instructions are aliases to some of the conditional select 3851 // instructions. However, the condition code is inverted in the aliased 3852 // instruction. 3853 // 3854 // FIXME: Is this the correct way to handle these? Or should the parser 3855 // generate the aliased instructions directly? 3856 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm"); 3857 bool condCodeThirdOperand = 3858 (Head == "cinc" || Head == "cinv" || Head == "cneg"); 3859 3860 // Read the remaining operands. 3861 if (getLexer().isNot(AsmToken::EndOfStatement)) { 3862 3863 unsigned N = 1; 3864 do { 3865 // Parse and remember the operand. 3866 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) || 3867 (N == 3 && condCodeThirdOperand) || 3868 (N == 2 && condCodeSecondOperand), 3869 condCodeSecondOperand || condCodeThirdOperand)) { 3870 return true; 3871 } 3872 3873 // After successfully parsing some operands there are two special cases to 3874 // consider (i.e. notional operands not separated by commas). Both are due 3875 // to memory specifiers: 3876 // + An RBrac will end an address for load/store/prefetch 3877 // + An '!' will indicate a pre-indexed operation. 3878 // 3879 // It's someone else's responsibility to make sure these tokens are sane 3880 // in the given context! 3881 3882 SMLoc RLoc = Parser.getTok().getLoc(); 3883 if (parseOptionalToken(AsmToken::RBrac)) 3884 Operands.push_back( 3885 AArch64Operand::CreateToken("]", false, RLoc, getContext())); 3886 SMLoc ELoc = Parser.getTok().getLoc(); 3887 if (parseOptionalToken(AsmToken::Exclaim)) 3888 Operands.push_back( 3889 AArch64Operand::CreateToken("!", false, ELoc, getContext())); 3890 3891 ++N; 3892 } while (parseOptionalToken(AsmToken::Comma)); 3893 } 3894 3895 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) 3896 return true; 3897 3898 return false; 3899 } 3900 3901 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) { 3902 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31)); 3903 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) || 3904 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) || 3905 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) || 3906 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) || 3907 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) || 3908 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0)); 3909 } 3910 3911 // FIXME: This entire function is a giant hack to provide us with decent 3912 // operand range validation/diagnostics until TableGen/MC can be extended 3913 // to support autogeneration of this kind of validation. 3914 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc, 3915 SmallVectorImpl<SMLoc> &Loc) { 3916 const MCRegisterInfo *RI = getContext().getRegisterInfo(); 3917 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 3918 3919 // A prefix only applies to the instruction following it. Here we extract 3920 // prefix information for the next instruction before validating the current 3921 // one so that in the case of failure we don't erronously continue using the 3922 // current prefix. 3923 PrefixInfo Prefix = NextPrefix; 3924 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags); 3925 3926 // Before validating the instruction in isolation we run through the rules 3927 // applicable when it follows a prefix instruction. 3928 // NOTE: brk & hlt can be prefixed but require no additional validation. 3929 if (Prefix.isActive() && 3930 (Inst.getOpcode() != AArch64::BRK) && 3931 (Inst.getOpcode() != AArch64::HLT)) { 3932 3933 // Prefixed intructions must have a destructive operand. 3934 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) == 3935 AArch64::NotDestructive) 3936 return Error(IDLoc, "instruction is unpredictable when following a" 3937 " movprfx, suggest replacing movprfx with mov"); 3938 3939 // Destination operands must match. 3940 if (Inst.getOperand(0).getReg() != Prefix.getDstReg()) 3941 return Error(Loc[0], "instruction is unpredictable when following a" 3942 " movprfx writing to a different destination"); 3943 3944 // Destination operand must not be used in any other location. 3945 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) { 3946 if (Inst.getOperand(i).isReg() && 3947 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) && 3948 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg())) 3949 return Error(Loc[0], "instruction is unpredictable when following a" 3950 " movprfx and destination also used as non-destructive" 3951 " source"); 3952 } 3953 3954 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID]; 3955 if (Prefix.isPredicated()) { 3956 int PgIdx = -1; 3957 3958 // Find the instructions general predicate. 3959 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) 3960 if (Inst.getOperand(i).isReg() && 3961 PPRRegClass.contains(Inst.getOperand(i).getReg())) { 3962 PgIdx = i; 3963 break; 3964 } 3965 3966 // Instruction must be predicated if the movprfx is predicated. 3967 if (PgIdx == -1 || 3968 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone) 3969 return Error(IDLoc, "instruction is unpredictable when following a" 3970 " predicated movprfx, suggest using unpredicated movprfx"); 3971 3972 // Instruction must use same general predicate as the movprfx. 3973 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg()) 3974 return Error(IDLoc, "instruction is unpredictable when following a" 3975 " predicated movprfx using a different general predicate"); 3976 3977 // Instruction element type must match the movprfx. 3978 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize()) 3979 return Error(IDLoc, "instruction is unpredictable when following a" 3980 " predicated movprfx with a different element size"); 3981 } 3982 } 3983 3984 // Check for indexed addressing modes w/ the base register being the 3985 // same as a destination/source register or pair load where 3986 // the Rt == Rt2. All of those are undefined behaviour. 3987 switch (Inst.getOpcode()) { 3988 case AArch64::LDPSWpre: 3989 case AArch64::LDPWpost: 3990 case AArch64::LDPWpre: 3991 case AArch64::LDPXpost: 3992 case AArch64::LDPXpre: { 3993 unsigned Rt = Inst.getOperand(1).getReg(); 3994 unsigned Rt2 = Inst.getOperand(2).getReg(); 3995 unsigned Rn = Inst.getOperand(3).getReg(); 3996 if (RI->isSubRegisterEq(Rn, Rt)) 3997 return Error(Loc[0], "unpredictable LDP instruction, writeback base " 3998 "is also a destination"); 3999 if (RI->isSubRegisterEq(Rn, Rt2)) 4000 return Error(Loc[1], "unpredictable LDP instruction, writeback base " 4001 "is also a destination"); 4002 LLVM_FALLTHROUGH; 4003 } 4004 case AArch64::LDPDi: 4005 case AArch64::LDPQi: 4006 case AArch64::LDPSi: 4007 case AArch64::LDPSWi: 4008 case AArch64::LDPWi: 4009 case AArch64::LDPXi: { 4010 unsigned Rt = Inst.getOperand(0).getReg(); 4011 unsigned Rt2 = Inst.getOperand(1).getReg(); 4012 if (Rt == Rt2) 4013 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); 4014 break; 4015 } 4016 case AArch64::LDPDpost: 4017 case AArch64::LDPDpre: 4018 case AArch64::LDPQpost: 4019 case AArch64::LDPQpre: 4020 case AArch64::LDPSpost: 4021 case AArch64::LDPSpre: 4022 case AArch64::LDPSWpost: { 4023 unsigned Rt = Inst.getOperand(1).getReg(); 4024 unsigned Rt2 = Inst.getOperand(2).getReg(); 4025 if (Rt == Rt2) 4026 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt"); 4027 break; 4028 } 4029 case AArch64::STPDpost: 4030 case AArch64::STPDpre: 4031 case AArch64::STPQpost: 4032 case AArch64::STPQpre: 4033 case AArch64::STPSpost: 4034 case AArch64::STPSpre: 4035 case AArch64::STPWpost: 4036 case AArch64::STPWpre: 4037 case AArch64::STPXpost: 4038 case AArch64::STPXpre: { 4039 unsigned Rt = Inst.getOperand(1).getReg(); 4040 unsigned Rt2 = Inst.getOperand(2).getReg(); 4041 unsigned Rn = Inst.getOperand(3).getReg(); 4042 if (RI->isSubRegisterEq(Rn, Rt)) 4043 return Error(Loc[0], "unpredictable STP instruction, writeback base " 4044 "is also a source"); 4045 if (RI->isSubRegisterEq(Rn, Rt2)) 4046 return Error(Loc[1], "unpredictable STP instruction, writeback base " 4047 "is also a source"); 4048 break; 4049 } 4050 case AArch64::LDRBBpre: 4051 case AArch64::LDRBpre: 4052 case AArch64::LDRHHpre: 4053 case AArch64::LDRHpre: 4054 case AArch64::LDRSBWpre: 4055 case AArch64::LDRSBXpre: 4056 case AArch64::LDRSHWpre: 4057 case AArch64::LDRSHXpre: 4058 case AArch64::LDRSWpre: 4059 case AArch64::LDRWpre: 4060 case AArch64::LDRXpre: 4061 case AArch64::LDRBBpost: 4062 case AArch64::LDRBpost: 4063 case AArch64::LDRHHpost: 4064 case AArch64::LDRHpost: 4065 case AArch64::LDRSBWpost: 4066 case AArch64::LDRSBXpost: 4067 case AArch64::LDRSHWpost: 4068 case AArch64::LDRSHXpost: 4069 case AArch64::LDRSWpost: 4070 case AArch64::LDRWpost: 4071 case AArch64::LDRXpost: { 4072 unsigned Rt = Inst.getOperand(1).getReg(); 4073 unsigned Rn = Inst.getOperand(2).getReg(); 4074 if (RI->isSubRegisterEq(Rn, Rt)) 4075 return Error(Loc[0], "unpredictable LDR instruction, writeback base " 4076 "is also a source"); 4077 break; 4078 } 4079 case AArch64::STRBBpost: 4080 case AArch64::STRBpost: 4081 case AArch64::STRHHpost: 4082 case AArch64::STRHpost: 4083 case AArch64::STRWpost: 4084 case AArch64::STRXpost: 4085 case AArch64::STRBBpre: 4086 case AArch64::STRBpre: 4087 case AArch64::STRHHpre: 4088 case AArch64::STRHpre: 4089 case AArch64::STRWpre: 4090 case AArch64::STRXpre: { 4091 unsigned Rt = Inst.getOperand(1).getReg(); 4092 unsigned Rn = Inst.getOperand(2).getReg(); 4093 if (RI->isSubRegisterEq(Rn, Rt)) 4094 return Error(Loc[0], "unpredictable STR instruction, writeback base " 4095 "is also a source"); 4096 break; 4097 } 4098 case AArch64::STXRB: 4099 case AArch64::STXRH: 4100 case AArch64::STXRW: 4101 case AArch64::STXRX: 4102 case AArch64::STLXRB: 4103 case AArch64::STLXRH: 4104 case AArch64::STLXRW: 4105 case AArch64::STLXRX: { 4106 unsigned Rs = Inst.getOperand(0).getReg(); 4107 unsigned Rt = Inst.getOperand(1).getReg(); 4108 unsigned Rn = Inst.getOperand(2).getReg(); 4109 if (RI->isSubRegisterEq(Rt, Rs) || 4110 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) 4111 return Error(Loc[0], 4112 "unpredictable STXR instruction, status is also a source"); 4113 break; 4114 } 4115 case AArch64::STXPW: 4116 case AArch64::STXPX: 4117 case AArch64::STLXPW: 4118 case AArch64::STLXPX: { 4119 unsigned Rs = Inst.getOperand(0).getReg(); 4120 unsigned Rt1 = Inst.getOperand(1).getReg(); 4121 unsigned Rt2 = Inst.getOperand(2).getReg(); 4122 unsigned Rn = Inst.getOperand(3).getReg(); 4123 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) || 4124 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP)) 4125 return Error(Loc[0], 4126 "unpredictable STXP instruction, status is also a source"); 4127 break; 4128 } 4129 case AArch64::LDRABwriteback: 4130 case AArch64::LDRAAwriteback: { 4131 unsigned Xt = Inst.getOperand(0).getReg(); 4132 unsigned Xn = Inst.getOperand(1).getReg(); 4133 if (Xt == Xn) 4134 return Error(Loc[0], 4135 "unpredictable LDRA instruction, writeback base" 4136 " is also a destination"); 4137 break; 4138 } 4139 } 4140 4141 4142 // Now check immediate ranges. Separate from the above as there is overlap 4143 // in the instructions being checked and this keeps the nested conditionals 4144 // to a minimum. 4145 switch (Inst.getOpcode()) { 4146 case AArch64::ADDSWri: 4147 case AArch64::ADDSXri: 4148 case AArch64::ADDWri: 4149 case AArch64::ADDXri: 4150 case AArch64::SUBSWri: 4151 case AArch64::SUBSXri: 4152 case AArch64::SUBWri: 4153 case AArch64::SUBXri: { 4154 // Annoyingly we can't do this in the isAddSubImm predicate, so there is 4155 // some slight duplication here. 4156 if (Inst.getOperand(2).isExpr()) { 4157 const MCExpr *Expr = Inst.getOperand(2).getExpr(); 4158 AArch64MCExpr::VariantKind ELFRefKind; 4159 MCSymbolRefExpr::VariantKind DarwinRefKind; 4160 int64_t Addend; 4161 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) { 4162 4163 // Only allow these with ADDXri. 4164 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF || 4165 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) && 4166 Inst.getOpcode() == AArch64::ADDXri) 4167 return false; 4168 4169 // Only allow these with ADDXri/ADDWri 4170 if ((ELFRefKind == AArch64MCExpr::VK_LO12 || 4171 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 || 4172 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 || 4173 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC || 4174 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 || 4175 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 || 4176 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC || 4177 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 || 4178 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 || 4179 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) && 4180 (Inst.getOpcode() == AArch64::ADDXri || 4181 Inst.getOpcode() == AArch64::ADDWri)) 4182 return false; 4183 4184 // Don't allow symbol refs in the immediate field otherwise 4185 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of 4186 // operands of the original instruction (i.e. 'add w0, w1, borked' vs 4187 // 'cmp w0, 'borked') 4188 return Error(Loc.back(), "invalid immediate expression"); 4189 } 4190 // We don't validate more complex expressions here 4191 } 4192 return false; 4193 } 4194 default: 4195 return false; 4196 } 4197 } 4198 4199 static std::string AArch64MnemonicSpellCheck(StringRef S, 4200 const FeatureBitset &FBS, 4201 unsigned VariantID = 0); 4202 4203 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode, 4204 uint64_t ErrorInfo, 4205 OperandVector &Operands) { 4206 switch (ErrCode) { 4207 case Match_InvalidTiedOperand: { 4208 RegConstraintEqualityTy EqTy = 4209 static_cast<const AArch64Operand &>(*Operands[ErrorInfo]) 4210 .getRegEqualityTy(); 4211 switch (EqTy) { 4212 case RegConstraintEqualityTy::EqualsSubReg: 4213 return Error(Loc, "operand must be 64-bit form of destination register"); 4214 case RegConstraintEqualityTy::EqualsSuperReg: 4215 return Error(Loc, "operand must be 32-bit form of destination register"); 4216 case RegConstraintEqualityTy::EqualsReg: 4217 return Error(Loc, "operand must match destination register"); 4218 } 4219 llvm_unreachable("Unknown RegConstraintEqualityTy"); 4220 } 4221 case Match_MissingFeature: 4222 return Error(Loc, 4223 "instruction requires a CPU feature not currently enabled"); 4224 case Match_InvalidOperand: 4225 return Error(Loc, "invalid operand for instruction"); 4226 case Match_InvalidSuffix: 4227 return Error(Loc, "invalid type suffix for instruction"); 4228 case Match_InvalidCondCode: 4229 return Error(Loc, "expected AArch64 condition code"); 4230 case Match_AddSubRegExtendSmall: 4231 return Error(Loc, 4232 "expected '[su]xt[bhw]' with optional integer in range [0, 4]"); 4233 case Match_AddSubRegExtendLarge: 4234 return Error(Loc, 4235 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]"); 4236 case Match_AddSubSecondSource: 4237 return Error(Loc, 4238 "expected compatible register, symbol or integer in range [0, 4095]"); 4239 case Match_LogicalSecondSource: 4240 return Error(Loc, "expected compatible register or logical immediate"); 4241 case Match_InvalidMovImm32Shift: 4242 return Error(Loc, "expected 'lsl' with optional integer 0 or 16"); 4243 case Match_InvalidMovImm64Shift: 4244 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48"); 4245 case Match_AddSubRegShift32: 4246 return Error(Loc, 4247 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]"); 4248 case Match_AddSubRegShift64: 4249 return Error(Loc, 4250 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]"); 4251 case Match_InvalidFPImm: 4252 return Error(Loc, 4253 "expected compatible register or floating-point constant"); 4254 case Match_InvalidMemoryIndexedSImm6: 4255 return Error(Loc, "index must be an integer in range [-32, 31]."); 4256 case Match_InvalidMemoryIndexedSImm5: 4257 return Error(Loc, "index must be an integer in range [-16, 15]."); 4258 case Match_InvalidMemoryIndexed1SImm4: 4259 return Error(Loc, "index must be an integer in range [-8, 7]."); 4260 case Match_InvalidMemoryIndexed2SImm4: 4261 return Error(Loc, "index must be a multiple of 2 in range [-16, 14]."); 4262 case Match_InvalidMemoryIndexed3SImm4: 4263 return Error(Loc, "index must be a multiple of 3 in range [-24, 21]."); 4264 case Match_InvalidMemoryIndexed4SImm4: 4265 return Error(Loc, "index must be a multiple of 4 in range [-32, 28]."); 4266 case Match_InvalidMemoryIndexed16SImm4: 4267 return Error(Loc, "index must be a multiple of 16 in range [-128, 112]."); 4268 case Match_InvalidMemoryIndexed32SImm4: 4269 return Error(Loc, "index must be a multiple of 32 in range [-256, 224]."); 4270 case Match_InvalidMemoryIndexed1SImm6: 4271 return Error(Loc, "index must be an integer in range [-32, 31]."); 4272 case Match_InvalidMemoryIndexedSImm8: 4273 return Error(Loc, "index must be an integer in range [-128, 127]."); 4274 case Match_InvalidMemoryIndexedSImm9: 4275 return Error(Loc, "index must be an integer in range [-256, 255]."); 4276 case Match_InvalidMemoryIndexed16SImm9: 4277 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080]."); 4278 case Match_InvalidMemoryIndexed8SImm10: 4279 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088]."); 4280 case Match_InvalidMemoryIndexed4SImm7: 4281 return Error(Loc, "index must be a multiple of 4 in range [-256, 252]."); 4282 case Match_InvalidMemoryIndexed8SImm7: 4283 return Error(Loc, "index must be a multiple of 8 in range [-512, 504]."); 4284 case Match_InvalidMemoryIndexed16SImm7: 4285 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008]."); 4286 case Match_InvalidMemoryIndexed8UImm5: 4287 return Error(Loc, "index must be a multiple of 8 in range [0, 248]."); 4288 case Match_InvalidMemoryIndexed4UImm5: 4289 return Error(Loc, "index must be a multiple of 4 in range [0, 124]."); 4290 case Match_InvalidMemoryIndexed2UImm5: 4291 return Error(Loc, "index must be a multiple of 2 in range [0, 62]."); 4292 case Match_InvalidMemoryIndexed8UImm6: 4293 return Error(Loc, "index must be a multiple of 8 in range [0, 504]."); 4294 case Match_InvalidMemoryIndexed16UImm6: 4295 return Error(Loc, "index must be a multiple of 16 in range [0, 1008]."); 4296 case Match_InvalidMemoryIndexed4UImm6: 4297 return Error(Loc, "index must be a multiple of 4 in range [0, 252]."); 4298 case Match_InvalidMemoryIndexed2UImm6: 4299 return Error(Loc, "index must be a multiple of 2 in range [0, 126]."); 4300 case Match_InvalidMemoryIndexed1UImm6: 4301 return Error(Loc, "index must be in range [0, 63]."); 4302 case Match_InvalidMemoryWExtend8: 4303 return Error(Loc, 4304 "expected 'uxtw' or 'sxtw' with optional shift of #0"); 4305 case Match_InvalidMemoryWExtend16: 4306 return Error(Loc, 4307 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1"); 4308 case Match_InvalidMemoryWExtend32: 4309 return Error(Loc, 4310 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2"); 4311 case Match_InvalidMemoryWExtend64: 4312 return Error(Loc, 4313 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3"); 4314 case Match_InvalidMemoryWExtend128: 4315 return Error(Loc, 4316 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4"); 4317 case Match_InvalidMemoryXExtend8: 4318 return Error(Loc, 4319 "expected 'lsl' or 'sxtx' with optional shift of #0"); 4320 case Match_InvalidMemoryXExtend16: 4321 return Error(Loc, 4322 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1"); 4323 case Match_InvalidMemoryXExtend32: 4324 return Error(Loc, 4325 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2"); 4326 case Match_InvalidMemoryXExtend64: 4327 return Error(Loc, 4328 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3"); 4329 case Match_InvalidMemoryXExtend128: 4330 return Error(Loc, 4331 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4"); 4332 case Match_InvalidMemoryIndexed1: 4333 return Error(Loc, "index must be an integer in range [0, 4095]."); 4334 case Match_InvalidMemoryIndexed2: 4335 return Error(Loc, "index must be a multiple of 2 in range [0, 8190]."); 4336 case Match_InvalidMemoryIndexed4: 4337 return Error(Loc, "index must be a multiple of 4 in range [0, 16380]."); 4338 case Match_InvalidMemoryIndexed8: 4339 return Error(Loc, "index must be a multiple of 8 in range [0, 32760]."); 4340 case Match_InvalidMemoryIndexed16: 4341 return Error(Loc, "index must be a multiple of 16 in range [0, 65520]."); 4342 case Match_InvalidImm0_1: 4343 return Error(Loc, "immediate must be an integer in range [0, 1]."); 4344 case Match_InvalidImm0_7: 4345 return Error(Loc, "immediate must be an integer in range [0, 7]."); 4346 case Match_InvalidImm0_15: 4347 return Error(Loc, "immediate must be an integer in range [0, 15]."); 4348 case Match_InvalidImm0_31: 4349 return Error(Loc, "immediate must be an integer in range [0, 31]."); 4350 case Match_InvalidImm0_63: 4351 return Error(Loc, "immediate must be an integer in range [0, 63]."); 4352 case Match_InvalidImm0_127: 4353 return Error(Loc, "immediate must be an integer in range [0, 127]."); 4354 case Match_InvalidImm0_255: 4355 return Error(Loc, "immediate must be an integer in range [0, 255]."); 4356 case Match_InvalidImm0_65535: 4357 return Error(Loc, "immediate must be an integer in range [0, 65535]."); 4358 case Match_InvalidImm1_8: 4359 return Error(Loc, "immediate must be an integer in range [1, 8]."); 4360 case Match_InvalidImm1_16: 4361 return Error(Loc, "immediate must be an integer in range [1, 16]."); 4362 case Match_InvalidImm1_32: 4363 return Error(Loc, "immediate must be an integer in range [1, 32]."); 4364 case Match_InvalidImm1_64: 4365 return Error(Loc, "immediate must be an integer in range [1, 64]."); 4366 case Match_InvalidSVEAddSubImm8: 4367 return Error(Loc, "immediate must be an integer in range [0, 255]" 4368 " with a shift amount of 0"); 4369 case Match_InvalidSVEAddSubImm16: 4370 case Match_InvalidSVEAddSubImm32: 4371 case Match_InvalidSVEAddSubImm64: 4372 return Error(Loc, "immediate must be an integer in range [0, 255] or a " 4373 "multiple of 256 in range [256, 65280]"); 4374 case Match_InvalidSVECpyImm8: 4375 return Error(Loc, "immediate must be an integer in range [-128, 255]" 4376 " with a shift amount of 0"); 4377 case Match_InvalidSVECpyImm16: 4378 return Error(Loc, "immediate must be an integer in range [-128, 127] or a " 4379 "multiple of 256 in range [-32768, 65280]"); 4380 case Match_InvalidSVECpyImm32: 4381 case Match_InvalidSVECpyImm64: 4382 return Error(Loc, "immediate must be an integer in range [-128, 127] or a " 4383 "multiple of 256 in range [-32768, 32512]"); 4384 case Match_InvalidIndexRange1_1: 4385 return Error(Loc, "expected lane specifier '[1]'"); 4386 case Match_InvalidIndexRange0_15: 4387 return Error(Loc, "vector lane must be an integer in range [0, 15]."); 4388 case Match_InvalidIndexRange0_7: 4389 return Error(Loc, "vector lane must be an integer in range [0, 7]."); 4390 case Match_InvalidIndexRange0_3: 4391 return Error(Loc, "vector lane must be an integer in range [0, 3]."); 4392 case Match_InvalidIndexRange0_1: 4393 return Error(Loc, "vector lane must be an integer in range [0, 1]."); 4394 case Match_InvalidSVEIndexRange0_63: 4395 return Error(Loc, "vector lane must be an integer in range [0, 63]."); 4396 case Match_InvalidSVEIndexRange0_31: 4397 return Error(Loc, "vector lane must be an integer in range [0, 31]."); 4398 case Match_InvalidSVEIndexRange0_15: 4399 return Error(Loc, "vector lane must be an integer in range [0, 15]."); 4400 case Match_InvalidSVEIndexRange0_7: 4401 return Error(Loc, "vector lane must be an integer in range [0, 7]."); 4402 case Match_InvalidSVEIndexRange0_3: 4403 return Error(Loc, "vector lane must be an integer in range [0, 3]."); 4404 case Match_InvalidLabel: 4405 return Error(Loc, "expected label or encodable integer pc offset"); 4406 case Match_MRS: 4407 return Error(Loc, "expected readable system register"); 4408 case Match_MSR: 4409 return Error(Loc, "expected writable system register or pstate"); 4410 case Match_InvalidComplexRotationEven: 4411 return Error(Loc, "complex rotation must be 0, 90, 180 or 270."); 4412 case Match_InvalidComplexRotationOdd: 4413 return Error(Loc, "complex rotation must be 90 or 270."); 4414 case Match_MnemonicFail: { 4415 std::string Suggestion = AArch64MnemonicSpellCheck( 4416 ((AArch64Operand &)*Operands[0]).getToken(), 4417 ComputeAvailableFeatures(STI->getFeatureBits())); 4418 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion); 4419 } 4420 case Match_InvalidGPR64shifted8: 4421 return Error(Loc, "register must be x0..x30 or xzr, without shift"); 4422 case Match_InvalidGPR64shifted16: 4423 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'"); 4424 case Match_InvalidGPR64shifted32: 4425 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'"); 4426 case Match_InvalidGPR64shifted64: 4427 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'"); 4428 case Match_InvalidGPR64NoXZRshifted8: 4429 return Error(Loc, "register must be x0..x30 without shift"); 4430 case Match_InvalidGPR64NoXZRshifted16: 4431 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'"); 4432 case Match_InvalidGPR64NoXZRshifted32: 4433 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'"); 4434 case Match_InvalidGPR64NoXZRshifted64: 4435 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'"); 4436 case Match_InvalidZPR32UXTW8: 4437 case Match_InvalidZPR32SXTW8: 4438 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'"); 4439 case Match_InvalidZPR32UXTW16: 4440 case Match_InvalidZPR32SXTW16: 4441 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'"); 4442 case Match_InvalidZPR32UXTW32: 4443 case Match_InvalidZPR32SXTW32: 4444 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'"); 4445 case Match_InvalidZPR32UXTW64: 4446 case Match_InvalidZPR32SXTW64: 4447 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'"); 4448 case Match_InvalidZPR64UXTW8: 4449 case Match_InvalidZPR64SXTW8: 4450 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'"); 4451 case Match_InvalidZPR64UXTW16: 4452 case Match_InvalidZPR64SXTW16: 4453 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'"); 4454 case Match_InvalidZPR64UXTW32: 4455 case Match_InvalidZPR64SXTW32: 4456 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'"); 4457 case Match_InvalidZPR64UXTW64: 4458 case Match_InvalidZPR64SXTW64: 4459 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'"); 4460 case Match_InvalidZPR32LSL8: 4461 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'"); 4462 case Match_InvalidZPR32LSL16: 4463 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'"); 4464 case Match_InvalidZPR32LSL32: 4465 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'"); 4466 case Match_InvalidZPR32LSL64: 4467 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'"); 4468 case Match_InvalidZPR64LSL8: 4469 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'"); 4470 case Match_InvalidZPR64LSL16: 4471 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'"); 4472 case Match_InvalidZPR64LSL32: 4473 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'"); 4474 case Match_InvalidZPR64LSL64: 4475 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'"); 4476 case Match_InvalidZPR0: 4477 return Error(Loc, "expected register without element width suffix"); 4478 case Match_InvalidZPR8: 4479 case Match_InvalidZPR16: 4480 case Match_InvalidZPR32: 4481 case Match_InvalidZPR64: 4482 case Match_InvalidZPR128: 4483 return Error(Loc, "invalid element width"); 4484 case Match_InvalidZPR_3b8: 4485 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b"); 4486 case Match_InvalidZPR_3b16: 4487 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h"); 4488 case Match_InvalidZPR_3b32: 4489 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s"); 4490 case Match_InvalidZPR_4b16: 4491 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h"); 4492 case Match_InvalidZPR_4b32: 4493 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s"); 4494 case Match_InvalidZPR_4b64: 4495 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d"); 4496 case Match_InvalidSVEPattern: 4497 return Error(Loc, "invalid predicate pattern"); 4498 case Match_InvalidSVEPredicateAnyReg: 4499 case Match_InvalidSVEPredicateBReg: 4500 case Match_InvalidSVEPredicateHReg: 4501 case Match_InvalidSVEPredicateSReg: 4502 case Match_InvalidSVEPredicateDReg: 4503 return Error(Loc, "invalid predicate register."); 4504 case Match_InvalidSVEPredicate3bAnyReg: 4505 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)"); 4506 case Match_InvalidSVEPredicate3bBReg: 4507 return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b"); 4508 case Match_InvalidSVEPredicate3bHReg: 4509 return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h"); 4510 case Match_InvalidSVEPredicate3bSReg: 4511 return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s"); 4512 case Match_InvalidSVEPredicate3bDReg: 4513 return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d"); 4514 case Match_InvalidSVEExactFPImmOperandHalfOne: 4515 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0."); 4516 case Match_InvalidSVEExactFPImmOperandHalfTwo: 4517 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0."); 4518 case Match_InvalidSVEExactFPImmOperandZeroOne: 4519 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0."); 4520 default: 4521 llvm_unreachable("unexpected error code!"); 4522 } 4523 } 4524 4525 static const char *getSubtargetFeatureName(uint64_t Val); 4526 4527 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 4528 OperandVector &Operands, 4529 MCStreamer &Out, 4530 uint64_t &ErrorInfo, 4531 bool MatchingInlineAsm) { 4532 assert(!Operands.empty() && "Unexpect empty operand list!"); 4533 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]); 4534 assert(Op.isToken() && "Leading operand should always be a mnemonic!"); 4535 4536 StringRef Tok = Op.getToken(); 4537 unsigned NumOperands = Operands.size(); 4538 4539 if (NumOperands == 4 && Tok == "lsl") { 4540 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); 4541 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); 4542 if (Op2.isScalarReg() && Op3.isImm()) { 4543 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); 4544 if (Op3CE) { 4545 uint64_t Op3Val = Op3CE->getValue(); 4546 uint64_t NewOp3Val = 0; 4547 uint64_t NewOp4Val = 0; 4548 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains( 4549 Op2.getReg())) { 4550 NewOp3Val = (32 - Op3Val) & 0x1f; 4551 NewOp4Val = 31 - Op3Val; 4552 } else { 4553 NewOp3Val = (64 - Op3Val) & 0x3f; 4554 NewOp4Val = 63 - Op3Val; 4555 } 4556 4557 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext()); 4558 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext()); 4559 4560 Operands[0] = AArch64Operand::CreateToken( 4561 "ubfm", false, Op.getStartLoc(), getContext()); 4562 Operands.push_back(AArch64Operand::CreateImm( 4563 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext())); 4564 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(), 4565 Op3.getEndLoc(), getContext()); 4566 } 4567 } 4568 } else if (NumOperands == 4 && Tok == "bfc") { 4569 // FIXME: Horrible hack to handle BFC->BFM alias. 4570 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); 4571 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]); 4572 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]); 4573 4574 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) { 4575 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm()); 4576 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm()); 4577 4578 if (LSBCE && WidthCE) { 4579 uint64_t LSB = LSBCE->getValue(); 4580 uint64_t Width = WidthCE->getValue(); 4581 4582 uint64_t RegWidth = 0; 4583 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 4584 Op1.getReg())) 4585 RegWidth = 64; 4586 else 4587 RegWidth = 32; 4588 4589 if (LSB >= RegWidth) 4590 return Error(LSBOp.getStartLoc(), 4591 "expected integer in range [0, 31]"); 4592 if (Width < 1 || Width > RegWidth) 4593 return Error(WidthOp.getStartLoc(), 4594 "expected integer in range [1, 32]"); 4595 4596 uint64_t ImmR = 0; 4597 if (RegWidth == 32) 4598 ImmR = (32 - LSB) & 0x1f; 4599 else 4600 ImmR = (64 - LSB) & 0x3f; 4601 4602 uint64_t ImmS = Width - 1; 4603 4604 if (ImmR != 0 && ImmS >= ImmR) 4605 return Error(WidthOp.getStartLoc(), 4606 "requested insert overflows register"); 4607 4608 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext()); 4609 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext()); 4610 Operands[0] = AArch64Operand::CreateToken( 4611 "bfm", false, Op.getStartLoc(), getContext()); 4612 Operands[2] = AArch64Operand::CreateReg( 4613 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar, 4614 SMLoc(), SMLoc(), getContext()); 4615 Operands[3] = AArch64Operand::CreateImm( 4616 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext()); 4617 Operands.emplace_back( 4618 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(), 4619 WidthOp.getEndLoc(), getContext())); 4620 } 4621 } 4622 } else if (NumOperands == 5) { 4623 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and 4624 // UBFIZ -> UBFM aliases. 4625 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") { 4626 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); 4627 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); 4628 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); 4629 4630 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { 4631 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); 4632 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); 4633 4634 if (Op3CE && Op4CE) { 4635 uint64_t Op3Val = Op3CE->getValue(); 4636 uint64_t Op4Val = Op4CE->getValue(); 4637 4638 uint64_t RegWidth = 0; 4639 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 4640 Op1.getReg())) 4641 RegWidth = 64; 4642 else 4643 RegWidth = 32; 4644 4645 if (Op3Val >= RegWidth) 4646 return Error(Op3.getStartLoc(), 4647 "expected integer in range [0, 31]"); 4648 if (Op4Val < 1 || Op4Val > RegWidth) 4649 return Error(Op4.getStartLoc(), 4650 "expected integer in range [1, 32]"); 4651 4652 uint64_t NewOp3Val = 0; 4653 if (RegWidth == 32) 4654 NewOp3Val = (32 - Op3Val) & 0x1f; 4655 else 4656 NewOp3Val = (64 - Op3Val) & 0x3f; 4657 4658 uint64_t NewOp4Val = Op4Val - 1; 4659 4660 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) 4661 return Error(Op4.getStartLoc(), 4662 "requested insert overflows register"); 4663 4664 const MCExpr *NewOp3 = 4665 MCConstantExpr::create(NewOp3Val, getContext()); 4666 const MCExpr *NewOp4 = 4667 MCConstantExpr::create(NewOp4Val, getContext()); 4668 Operands[3] = AArch64Operand::CreateImm( 4669 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext()); 4670 Operands[4] = AArch64Operand::CreateImm( 4671 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); 4672 if (Tok == "bfi") 4673 Operands[0] = AArch64Operand::CreateToken( 4674 "bfm", false, Op.getStartLoc(), getContext()); 4675 else if (Tok == "sbfiz") 4676 Operands[0] = AArch64Operand::CreateToken( 4677 "sbfm", false, Op.getStartLoc(), getContext()); 4678 else if (Tok == "ubfiz") 4679 Operands[0] = AArch64Operand::CreateToken( 4680 "ubfm", false, Op.getStartLoc(), getContext()); 4681 else 4682 llvm_unreachable("No valid mnemonic for alias?"); 4683 } 4684 } 4685 4686 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and 4687 // UBFX -> UBFM aliases. 4688 } else if (NumOperands == 5 && 4689 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) { 4690 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); 4691 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); 4692 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]); 4693 4694 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) { 4695 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm()); 4696 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm()); 4697 4698 if (Op3CE && Op4CE) { 4699 uint64_t Op3Val = Op3CE->getValue(); 4700 uint64_t Op4Val = Op4CE->getValue(); 4701 4702 uint64_t RegWidth = 0; 4703 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 4704 Op1.getReg())) 4705 RegWidth = 64; 4706 else 4707 RegWidth = 32; 4708 4709 if (Op3Val >= RegWidth) 4710 return Error(Op3.getStartLoc(), 4711 "expected integer in range [0, 31]"); 4712 if (Op4Val < 1 || Op4Val > RegWidth) 4713 return Error(Op4.getStartLoc(), 4714 "expected integer in range [1, 32]"); 4715 4716 uint64_t NewOp4Val = Op3Val + Op4Val - 1; 4717 4718 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) 4719 return Error(Op4.getStartLoc(), 4720 "requested extract overflows register"); 4721 4722 const MCExpr *NewOp4 = 4723 MCConstantExpr::create(NewOp4Val, getContext()); 4724 Operands[4] = AArch64Operand::CreateImm( 4725 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext()); 4726 if (Tok == "bfxil") 4727 Operands[0] = AArch64Operand::CreateToken( 4728 "bfm", false, Op.getStartLoc(), getContext()); 4729 else if (Tok == "sbfx") 4730 Operands[0] = AArch64Operand::CreateToken( 4731 "sbfm", false, Op.getStartLoc(), getContext()); 4732 else if (Tok == "ubfx") 4733 Operands[0] = AArch64Operand::CreateToken( 4734 "ubfm", false, Op.getStartLoc(), getContext()); 4735 else 4736 llvm_unreachable("No valid mnemonic for alias?"); 4737 } 4738 } 4739 } 4740 } 4741 4742 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing 4743 // instruction for FP registers correctly in some rare circumstances. Convert 4744 // it to a safe instruction and warn (because silently changing someone's 4745 // assembly is rude). 4746 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] && 4747 NumOperands == 4 && Tok == "movi") { 4748 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]); 4749 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]); 4750 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]); 4751 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) || 4752 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) { 4753 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken(); 4754 if (Suffix.lower() == ".2d" && 4755 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) { 4756 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function" 4757 " correctly on this CPU, converting to equivalent movi.16b"); 4758 // Switch the suffix to .16b. 4759 unsigned Idx = Op1.isToken() ? 1 : 2; 4760 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc, 4761 getContext()); 4762 } 4763 } 4764 } 4765 4766 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands. 4767 // InstAlias can't quite handle this since the reg classes aren't 4768 // subclasses. 4769 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) { 4770 // The source register can be Wn here, but the matcher expects a 4771 // GPR64. Twiddle it here if necessary. 4772 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); 4773 if (Op.isScalarReg()) { 4774 unsigned Reg = getXRegFromWReg(Op.getReg()); 4775 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, 4776 Op.getStartLoc(), Op.getEndLoc(), 4777 getContext()); 4778 } 4779 } 4780 // FIXME: Likewise for sxt[bh] with a Xd dst operand 4781 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) { 4782 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); 4783 if (Op.isScalarReg() && 4784 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 4785 Op.getReg())) { 4786 // The source register can be Wn here, but the matcher expects a 4787 // GPR64. Twiddle it here if necessary. 4788 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]); 4789 if (Op.isScalarReg()) { 4790 unsigned Reg = getXRegFromWReg(Op.getReg()); 4791 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, 4792 Op.getStartLoc(), 4793 Op.getEndLoc(), getContext()); 4794 } 4795 } 4796 } 4797 // FIXME: Likewise for uxt[bh] with a Xd dst operand 4798 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) { 4799 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); 4800 if (Op.isScalarReg() && 4801 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains( 4802 Op.getReg())) { 4803 // The source register can be Wn here, but the matcher expects a 4804 // GPR32. Twiddle it here if necessary. 4805 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]); 4806 if (Op.isScalarReg()) { 4807 unsigned Reg = getWRegFromXReg(Op.getReg()); 4808 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar, 4809 Op.getStartLoc(), 4810 Op.getEndLoc(), getContext()); 4811 } 4812 } 4813 } 4814 4815 MCInst Inst; 4816 FeatureBitset MissingFeatures; 4817 // First try to match against the secondary set of tables containing the 4818 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2"). 4819 unsigned MatchResult = 4820 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, 4821 MatchingInlineAsm, 1); 4822 4823 // If that fails, try against the alternate table containing long-form NEON: 4824 // "fadd v0.2s, v1.2s, v2.2s" 4825 if (MatchResult != Match_Success) { 4826 // But first, save the short-form match result: we can use it in case the 4827 // long-form match also fails. 4828 auto ShortFormNEONErrorInfo = ErrorInfo; 4829 auto ShortFormNEONMatchResult = MatchResult; 4830 auto ShortFormNEONMissingFeatures = MissingFeatures; 4831 4832 MatchResult = 4833 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures, 4834 MatchingInlineAsm, 0); 4835 4836 // Now, both matches failed, and the long-form match failed on the mnemonic 4837 // suffix token operand. The short-form match failure is probably more 4838 // relevant: use it instead. 4839 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 && 4840 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() && 4841 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) { 4842 MatchResult = ShortFormNEONMatchResult; 4843 ErrorInfo = ShortFormNEONErrorInfo; 4844 MissingFeatures = ShortFormNEONMissingFeatures; 4845 } 4846 } 4847 4848 switch (MatchResult) { 4849 case Match_Success: { 4850 // Perform range checking and other semantic validations 4851 SmallVector<SMLoc, 8> OperandLocs; 4852 NumOperands = Operands.size(); 4853 for (unsigned i = 1; i < NumOperands; ++i) 4854 OperandLocs.push_back(Operands[i]->getStartLoc()); 4855 if (validateInstruction(Inst, IDLoc, OperandLocs)) 4856 return true; 4857 4858 Inst.setLoc(IDLoc); 4859 Out.emitInstruction(Inst, getSTI()); 4860 return false; 4861 } 4862 case Match_MissingFeature: { 4863 assert(MissingFeatures.any() && "Unknown missing feature!"); 4864 // Special case the error message for the very common case where only 4865 // a single subtarget feature is missing (neon, e.g.). 4866 std::string Msg = "instruction requires:"; 4867 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) { 4868 if (MissingFeatures[i]) { 4869 Msg += " "; 4870 Msg += getSubtargetFeatureName(i); 4871 } 4872 } 4873 return Error(IDLoc, Msg); 4874 } 4875 case Match_MnemonicFail: 4876 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands); 4877 case Match_InvalidOperand: { 4878 SMLoc ErrorLoc = IDLoc; 4879 4880 if (ErrorInfo != ~0ULL) { 4881 if (ErrorInfo >= Operands.size()) 4882 return Error(IDLoc, "too few operands for instruction", 4883 SMRange(IDLoc, getTok().getLoc())); 4884 4885 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); 4886 if (ErrorLoc == SMLoc()) 4887 ErrorLoc = IDLoc; 4888 } 4889 // If the match failed on a suffix token operand, tweak the diagnostic 4890 // accordingly. 4891 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() && 4892 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix()) 4893 MatchResult = Match_InvalidSuffix; 4894 4895 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); 4896 } 4897 case Match_InvalidTiedOperand: 4898 case Match_InvalidMemoryIndexed1: 4899 case Match_InvalidMemoryIndexed2: 4900 case Match_InvalidMemoryIndexed4: 4901 case Match_InvalidMemoryIndexed8: 4902 case Match_InvalidMemoryIndexed16: 4903 case Match_InvalidCondCode: 4904 case Match_AddSubRegExtendSmall: 4905 case Match_AddSubRegExtendLarge: 4906 case Match_AddSubSecondSource: 4907 case Match_LogicalSecondSource: 4908 case Match_AddSubRegShift32: 4909 case Match_AddSubRegShift64: 4910 case Match_InvalidMovImm32Shift: 4911 case Match_InvalidMovImm64Shift: 4912 case Match_InvalidFPImm: 4913 case Match_InvalidMemoryWExtend8: 4914 case Match_InvalidMemoryWExtend16: 4915 case Match_InvalidMemoryWExtend32: 4916 case Match_InvalidMemoryWExtend64: 4917 case Match_InvalidMemoryWExtend128: 4918 case Match_InvalidMemoryXExtend8: 4919 case Match_InvalidMemoryXExtend16: 4920 case Match_InvalidMemoryXExtend32: 4921 case Match_InvalidMemoryXExtend64: 4922 case Match_InvalidMemoryXExtend128: 4923 case Match_InvalidMemoryIndexed1SImm4: 4924 case Match_InvalidMemoryIndexed2SImm4: 4925 case Match_InvalidMemoryIndexed3SImm4: 4926 case Match_InvalidMemoryIndexed4SImm4: 4927 case Match_InvalidMemoryIndexed1SImm6: 4928 case Match_InvalidMemoryIndexed16SImm4: 4929 case Match_InvalidMemoryIndexed32SImm4: 4930 case Match_InvalidMemoryIndexed4SImm7: 4931 case Match_InvalidMemoryIndexed8SImm7: 4932 case Match_InvalidMemoryIndexed16SImm7: 4933 case Match_InvalidMemoryIndexed8UImm5: 4934 case Match_InvalidMemoryIndexed4UImm5: 4935 case Match_InvalidMemoryIndexed2UImm5: 4936 case Match_InvalidMemoryIndexed1UImm6: 4937 case Match_InvalidMemoryIndexed2UImm6: 4938 case Match_InvalidMemoryIndexed4UImm6: 4939 case Match_InvalidMemoryIndexed8UImm6: 4940 case Match_InvalidMemoryIndexed16UImm6: 4941 case Match_InvalidMemoryIndexedSImm6: 4942 case Match_InvalidMemoryIndexedSImm5: 4943 case Match_InvalidMemoryIndexedSImm8: 4944 case Match_InvalidMemoryIndexedSImm9: 4945 case Match_InvalidMemoryIndexed16SImm9: 4946 case Match_InvalidMemoryIndexed8SImm10: 4947 case Match_InvalidImm0_1: 4948 case Match_InvalidImm0_7: 4949 case Match_InvalidImm0_15: 4950 case Match_InvalidImm0_31: 4951 case Match_InvalidImm0_63: 4952 case Match_InvalidImm0_127: 4953 case Match_InvalidImm0_255: 4954 case Match_InvalidImm0_65535: 4955 case Match_InvalidImm1_8: 4956 case Match_InvalidImm1_16: 4957 case Match_InvalidImm1_32: 4958 case Match_InvalidImm1_64: 4959 case Match_InvalidSVEAddSubImm8: 4960 case Match_InvalidSVEAddSubImm16: 4961 case Match_InvalidSVEAddSubImm32: 4962 case Match_InvalidSVEAddSubImm64: 4963 case Match_InvalidSVECpyImm8: 4964 case Match_InvalidSVECpyImm16: 4965 case Match_InvalidSVECpyImm32: 4966 case Match_InvalidSVECpyImm64: 4967 case Match_InvalidIndexRange1_1: 4968 case Match_InvalidIndexRange0_15: 4969 case Match_InvalidIndexRange0_7: 4970 case Match_InvalidIndexRange0_3: 4971 case Match_InvalidIndexRange0_1: 4972 case Match_InvalidSVEIndexRange0_63: 4973 case Match_InvalidSVEIndexRange0_31: 4974 case Match_InvalidSVEIndexRange0_15: 4975 case Match_InvalidSVEIndexRange0_7: 4976 case Match_InvalidSVEIndexRange0_3: 4977 case Match_InvalidLabel: 4978 case Match_InvalidComplexRotationEven: 4979 case Match_InvalidComplexRotationOdd: 4980 case Match_InvalidGPR64shifted8: 4981 case Match_InvalidGPR64shifted16: 4982 case Match_InvalidGPR64shifted32: 4983 case Match_InvalidGPR64shifted64: 4984 case Match_InvalidGPR64NoXZRshifted8: 4985 case Match_InvalidGPR64NoXZRshifted16: 4986 case Match_InvalidGPR64NoXZRshifted32: 4987 case Match_InvalidGPR64NoXZRshifted64: 4988 case Match_InvalidZPR32UXTW8: 4989 case Match_InvalidZPR32UXTW16: 4990 case Match_InvalidZPR32UXTW32: 4991 case Match_InvalidZPR32UXTW64: 4992 case Match_InvalidZPR32SXTW8: 4993 case Match_InvalidZPR32SXTW16: 4994 case Match_InvalidZPR32SXTW32: 4995 case Match_InvalidZPR32SXTW64: 4996 case Match_InvalidZPR64UXTW8: 4997 case Match_InvalidZPR64SXTW8: 4998 case Match_InvalidZPR64UXTW16: 4999 case Match_InvalidZPR64SXTW16: 5000 case Match_InvalidZPR64UXTW32: 5001 case Match_InvalidZPR64SXTW32: 5002 case Match_InvalidZPR64UXTW64: 5003 case Match_InvalidZPR64SXTW64: 5004 case Match_InvalidZPR32LSL8: 5005 case Match_InvalidZPR32LSL16: 5006 case Match_InvalidZPR32LSL32: 5007 case Match_InvalidZPR32LSL64: 5008 case Match_InvalidZPR64LSL8: 5009 case Match_InvalidZPR64LSL16: 5010 case Match_InvalidZPR64LSL32: 5011 case Match_InvalidZPR64LSL64: 5012 case Match_InvalidZPR0: 5013 case Match_InvalidZPR8: 5014 case Match_InvalidZPR16: 5015 case Match_InvalidZPR32: 5016 case Match_InvalidZPR64: 5017 case Match_InvalidZPR128: 5018 case Match_InvalidZPR_3b8: 5019 case Match_InvalidZPR_3b16: 5020 case Match_InvalidZPR_3b32: 5021 case Match_InvalidZPR_4b16: 5022 case Match_InvalidZPR_4b32: 5023 case Match_InvalidZPR_4b64: 5024 case Match_InvalidSVEPredicateAnyReg: 5025 case Match_InvalidSVEPattern: 5026 case Match_InvalidSVEPredicateBReg: 5027 case Match_InvalidSVEPredicateHReg: 5028 case Match_InvalidSVEPredicateSReg: 5029 case Match_InvalidSVEPredicateDReg: 5030 case Match_InvalidSVEPredicate3bAnyReg: 5031 case Match_InvalidSVEPredicate3bBReg: 5032 case Match_InvalidSVEPredicate3bHReg: 5033 case Match_InvalidSVEPredicate3bSReg: 5034 case Match_InvalidSVEPredicate3bDReg: 5035 case Match_InvalidSVEExactFPImmOperandHalfOne: 5036 case Match_InvalidSVEExactFPImmOperandHalfTwo: 5037 case Match_InvalidSVEExactFPImmOperandZeroOne: 5038 case Match_MSR: 5039 case Match_MRS: { 5040 if (ErrorInfo >= Operands.size()) 5041 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc())); 5042 // Any time we get here, there's nothing fancy to do. Just get the 5043 // operand SMLoc and display the diagnostic. 5044 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc(); 5045 if (ErrorLoc == SMLoc()) 5046 ErrorLoc = IDLoc; 5047 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands); 5048 } 5049 } 5050 5051 llvm_unreachable("Implement any new match types added!"); 5052 } 5053 5054 /// ParseDirective parses the arm specific directives 5055 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) { 5056 const MCObjectFileInfo::Environment Format = 5057 getContext().getObjectFileInfo()->getObjectFileType(); 5058 bool IsMachO = Format == MCObjectFileInfo::IsMachO; 5059 5060 auto IDVal = DirectiveID.getIdentifier().lower(); 5061 SMLoc Loc = DirectiveID.getLoc(); 5062 if (IDVal == ".arch") 5063 parseDirectiveArch(Loc); 5064 else if (IDVal == ".cpu") 5065 parseDirectiveCPU(Loc); 5066 else if (IDVal == ".tlsdesccall") 5067 parseDirectiveTLSDescCall(Loc); 5068 else if (IDVal == ".ltorg" || IDVal == ".pool") 5069 parseDirectiveLtorg(Loc); 5070 else if (IDVal == ".unreq") 5071 parseDirectiveUnreq(Loc); 5072 else if (IDVal == ".inst") 5073 parseDirectiveInst(Loc); 5074 else if (IDVal == ".cfi_negate_ra_state") 5075 parseDirectiveCFINegateRAState(); 5076 else if (IDVal == ".cfi_b_key_frame") 5077 parseDirectiveCFIBKeyFrame(); 5078 else if (IDVal == ".arch_extension") 5079 parseDirectiveArchExtension(Loc); 5080 else if (IsMachO) { 5081 if (IDVal == MCLOHDirectiveName()) 5082 parseDirectiveLOH(IDVal, Loc); 5083 else 5084 return true; 5085 } else 5086 return true; 5087 return false; 5088 } 5089 5090 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind, 5091 SmallVector<StringRef, 4> &RequestedExtensions) { 5092 const bool NoCrypto = 5093 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), 5094 "nocrypto") != std::end(RequestedExtensions)); 5095 const bool Crypto = 5096 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(), 5097 "crypto") != std::end(RequestedExtensions)); 5098 5099 if (!NoCrypto && Crypto) { 5100 switch (ArchKind) { 5101 default: 5102 // Map 'generic' (and others) to sha2 and aes, because 5103 // that was the traditional meaning of crypto. 5104 case AArch64::ArchKind::ARMV8_1A: 5105 case AArch64::ArchKind::ARMV8_2A: 5106 case AArch64::ArchKind::ARMV8_3A: 5107 RequestedExtensions.push_back("sha2"); 5108 RequestedExtensions.push_back("aes"); 5109 break; 5110 case AArch64::ArchKind::ARMV8_4A: 5111 case AArch64::ArchKind::ARMV8_5A: 5112 case AArch64::ArchKind::ARMV8_6A: 5113 RequestedExtensions.push_back("sm4"); 5114 RequestedExtensions.push_back("sha3"); 5115 RequestedExtensions.push_back("sha2"); 5116 RequestedExtensions.push_back("aes"); 5117 break; 5118 } 5119 } else if (NoCrypto) { 5120 switch (ArchKind) { 5121 default: 5122 // Map 'generic' (and others) to sha2 and aes, because 5123 // that was the traditional meaning of crypto. 5124 case AArch64::ArchKind::ARMV8_1A: 5125 case AArch64::ArchKind::ARMV8_2A: 5126 case AArch64::ArchKind::ARMV8_3A: 5127 RequestedExtensions.push_back("nosha2"); 5128 RequestedExtensions.push_back("noaes"); 5129 break; 5130 case AArch64::ArchKind::ARMV8_4A: 5131 case AArch64::ArchKind::ARMV8_5A: 5132 case AArch64::ArchKind::ARMV8_6A: 5133 RequestedExtensions.push_back("nosm4"); 5134 RequestedExtensions.push_back("nosha3"); 5135 RequestedExtensions.push_back("nosha2"); 5136 RequestedExtensions.push_back("noaes"); 5137 break; 5138 } 5139 } 5140 } 5141 5142 /// parseDirectiveArch 5143 /// ::= .arch token 5144 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) { 5145 SMLoc ArchLoc = getLoc(); 5146 5147 StringRef Arch, ExtensionString; 5148 std::tie(Arch, ExtensionString) = 5149 getParser().parseStringToEndOfStatement().trim().split('+'); 5150 5151 AArch64::ArchKind ID = AArch64::parseArch(Arch); 5152 if (ID == AArch64::ArchKind::INVALID) 5153 return Error(ArchLoc, "unknown arch name"); 5154 5155 if (parseToken(AsmToken::EndOfStatement)) 5156 return true; 5157 5158 // Get the architecture and extension features. 5159 std::vector<StringRef> AArch64Features; 5160 AArch64::getArchFeatures(ID, AArch64Features); 5161 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID), 5162 AArch64Features); 5163 5164 MCSubtargetInfo &STI = copySTI(); 5165 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end()); 5166 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ",")); 5167 5168 SmallVector<StringRef, 4> RequestedExtensions; 5169 if (!ExtensionString.empty()) 5170 ExtensionString.split(RequestedExtensions, '+'); 5171 5172 ExpandCryptoAEK(ID, RequestedExtensions); 5173 5174 FeatureBitset Features = STI.getFeatureBits(); 5175 for (auto Name : RequestedExtensions) { 5176 bool EnableFeature = true; 5177 5178 if (Name.startswith_lower("no")) { 5179 EnableFeature = false; 5180 Name = Name.substr(2); 5181 } 5182 5183 for (const auto &Extension : ExtensionMap) { 5184 if (Extension.Name != Name) 5185 continue; 5186 5187 if (Extension.Features.none()) 5188 report_fatal_error("unsupported architectural extension: " + Name); 5189 5190 FeatureBitset ToggleFeatures = EnableFeature 5191 ? (~Features & Extension.Features) 5192 : ( Features & Extension.Features); 5193 FeatureBitset Features = 5194 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); 5195 setAvailableFeatures(Features); 5196 break; 5197 } 5198 } 5199 return false; 5200 } 5201 5202 /// parseDirectiveArchExtension 5203 /// ::= .arch_extension [no]feature 5204 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) { 5205 SMLoc ExtLoc = getLoc(); 5206 5207 StringRef Name = getParser().parseStringToEndOfStatement().trim(); 5208 5209 if (parseToken(AsmToken::EndOfStatement, 5210 "unexpected token in '.arch_extension' directive")) 5211 return true; 5212 5213 bool EnableFeature = true; 5214 if (Name.startswith_lower("no")) { 5215 EnableFeature = false; 5216 Name = Name.substr(2); 5217 } 5218 5219 MCSubtargetInfo &STI = copySTI(); 5220 FeatureBitset Features = STI.getFeatureBits(); 5221 for (const auto &Extension : ExtensionMap) { 5222 if (Extension.Name != Name) 5223 continue; 5224 5225 if (Extension.Features.none()) 5226 return Error(ExtLoc, "unsupported architectural extension: " + Name); 5227 5228 FeatureBitset ToggleFeatures = EnableFeature 5229 ? (~Features & Extension.Features) 5230 : (Features & Extension.Features); 5231 FeatureBitset Features = 5232 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); 5233 setAvailableFeatures(Features); 5234 return false; 5235 } 5236 5237 return Error(ExtLoc, "unknown architectural extension: " + Name); 5238 } 5239 5240 static SMLoc incrementLoc(SMLoc L, int Offset) { 5241 return SMLoc::getFromPointer(L.getPointer() + Offset); 5242 } 5243 5244 /// parseDirectiveCPU 5245 /// ::= .cpu id 5246 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) { 5247 SMLoc CurLoc = getLoc(); 5248 5249 StringRef CPU, ExtensionString; 5250 std::tie(CPU, ExtensionString) = 5251 getParser().parseStringToEndOfStatement().trim().split('+'); 5252 5253 if (parseToken(AsmToken::EndOfStatement)) 5254 return true; 5255 5256 SmallVector<StringRef, 4> RequestedExtensions; 5257 if (!ExtensionString.empty()) 5258 ExtensionString.split(RequestedExtensions, '+'); 5259 5260 // FIXME This is using tablegen data, but should be moved to ARMTargetParser 5261 // once that is tablegen'ed 5262 if (!getSTI().isCPUStringValid(CPU)) { 5263 Error(CurLoc, "unknown CPU name"); 5264 return false; 5265 } 5266 5267 MCSubtargetInfo &STI = copySTI(); 5268 STI.setDefaultFeatures(CPU, ""); 5269 CurLoc = incrementLoc(CurLoc, CPU.size()); 5270 5271 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions); 5272 5273 FeatureBitset Features = STI.getFeatureBits(); 5274 for (auto Name : RequestedExtensions) { 5275 // Advance source location past '+'. 5276 CurLoc = incrementLoc(CurLoc, 1); 5277 5278 bool EnableFeature = true; 5279 5280 if (Name.startswith_lower("no")) { 5281 EnableFeature = false; 5282 Name = Name.substr(2); 5283 } 5284 5285 bool FoundExtension = false; 5286 for (const auto &Extension : ExtensionMap) { 5287 if (Extension.Name != Name) 5288 continue; 5289 5290 if (Extension.Features.none()) 5291 report_fatal_error("unsupported architectural extension: " + Name); 5292 5293 FeatureBitset ToggleFeatures = EnableFeature 5294 ? (~Features & Extension.Features) 5295 : ( Features & Extension.Features); 5296 FeatureBitset Features = 5297 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); 5298 setAvailableFeatures(Features); 5299 FoundExtension = true; 5300 5301 break; 5302 } 5303 5304 if (!FoundExtension) 5305 Error(CurLoc, "unsupported architectural extension"); 5306 5307 CurLoc = incrementLoc(CurLoc, Name.size()); 5308 } 5309 return false; 5310 } 5311 5312 /// parseDirectiveInst 5313 /// ::= .inst opcode [, ...] 5314 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) { 5315 if (getLexer().is(AsmToken::EndOfStatement)) 5316 return Error(Loc, "expected expression following '.inst' directive"); 5317 5318 auto parseOp = [&]() -> bool { 5319 SMLoc L = getLoc(); 5320 const MCExpr *Expr = nullptr; 5321 if (check(getParser().parseExpression(Expr), L, "expected expression")) 5322 return true; 5323 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); 5324 if (check(!Value, L, "expected constant expression")) 5325 return true; 5326 getTargetStreamer().emitInst(Value->getValue()); 5327 return false; 5328 }; 5329 5330 if (parseMany(parseOp)) 5331 return addErrorSuffix(" in '.inst' directive"); 5332 return false; 5333 } 5334 5335 // parseDirectiveTLSDescCall: 5336 // ::= .tlsdesccall symbol 5337 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) { 5338 StringRef Name; 5339 if (check(getParser().parseIdentifier(Name), L, 5340 "expected symbol after directive") || 5341 parseToken(AsmToken::EndOfStatement)) 5342 return true; 5343 5344 MCSymbol *Sym = getContext().getOrCreateSymbol(Name); 5345 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext()); 5346 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext()); 5347 5348 MCInst Inst; 5349 Inst.setOpcode(AArch64::TLSDESCCALL); 5350 Inst.addOperand(MCOperand::createExpr(Expr)); 5351 5352 getParser().getStreamer().emitInstruction(Inst, getSTI()); 5353 return false; 5354 } 5355 5356 /// ::= .loh <lohName | lohId> label1, ..., labelN 5357 /// The number of arguments depends on the loh identifier. 5358 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) { 5359 MCLOHType Kind; 5360 if (getParser().getTok().isNot(AsmToken::Identifier)) { 5361 if (getParser().getTok().isNot(AsmToken::Integer)) 5362 return TokError("expected an identifier or a number in directive"); 5363 // We successfully get a numeric value for the identifier. 5364 // Check if it is valid. 5365 int64_t Id = getParser().getTok().getIntVal(); 5366 if (Id <= -1U && !isValidMCLOHType(Id)) 5367 return TokError("invalid numeric identifier in directive"); 5368 Kind = (MCLOHType)Id; 5369 } else { 5370 StringRef Name = getTok().getIdentifier(); 5371 // We successfully parse an identifier. 5372 // Check if it is a recognized one. 5373 int Id = MCLOHNameToId(Name); 5374 5375 if (Id == -1) 5376 return TokError("invalid identifier in directive"); 5377 Kind = (MCLOHType)Id; 5378 } 5379 // Consume the identifier. 5380 Lex(); 5381 // Get the number of arguments of this LOH. 5382 int NbArgs = MCLOHIdToNbArgs(Kind); 5383 5384 assert(NbArgs != -1 && "Invalid number of arguments"); 5385 5386 SmallVector<MCSymbol *, 3> Args; 5387 for (int Idx = 0; Idx < NbArgs; ++Idx) { 5388 StringRef Name; 5389 if (getParser().parseIdentifier(Name)) 5390 return TokError("expected identifier in directive"); 5391 Args.push_back(getContext().getOrCreateSymbol(Name)); 5392 5393 if (Idx + 1 == NbArgs) 5394 break; 5395 if (parseToken(AsmToken::Comma, 5396 "unexpected token in '" + Twine(IDVal) + "' directive")) 5397 return true; 5398 } 5399 if (parseToken(AsmToken::EndOfStatement, 5400 "unexpected token in '" + Twine(IDVal) + "' directive")) 5401 return true; 5402 5403 getStreamer().emitLOHDirective((MCLOHType)Kind, Args); 5404 return false; 5405 } 5406 5407 /// parseDirectiveLtorg 5408 /// ::= .ltorg | .pool 5409 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) { 5410 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 5411 return true; 5412 getTargetStreamer().emitCurrentConstantPool(); 5413 return false; 5414 } 5415 5416 /// parseDirectiveReq 5417 /// ::= name .req registername 5418 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 5419 MCAsmParser &Parser = getParser(); 5420 Parser.Lex(); // Eat the '.req' token. 5421 SMLoc SRegLoc = getLoc(); 5422 RegKind RegisterKind = RegKind::Scalar; 5423 unsigned RegNum; 5424 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum); 5425 5426 if (ParseRes != MatchOperand_Success) { 5427 StringRef Kind; 5428 RegisterKind = RegKind::NeonVector; 5429 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector); 5430 5431 if (ParseRes == MatchOperand_ParseFail) 5432 return true; 5433 5434 if (ParseRes == MatchOperand_Success && !Kind.empty()) 5435 return Error(SRegLoc, "vector register without type specifier expected"); 5436 } 5437 5438 if (ParseRes != MatchOperand_Success) { 5439 StringRef Kind; 5440 RegisterKind = RegKind::SVEDataVector; 5441 ParseRes = 5442 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); 5443 5444 if (ParseRes == MatchOperand_ParseFail) 5445 return true; 5446 5447 if (ParseRes == MatchOperand_Success && !Kind.empty()) 5448 return Error(SRegLoc, 5449 "sve vector register without type specifier expected"); 5450 } 5451 5452 if (ParseRes != MatchOperand_Success) { 5453 StringRef Kind; 5454 RegisterKind = RegKind::SVEPredicateVector; 5455 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector); 5456 5457 if (ParseRes == MatchOperand_ParseFail) 5458 return true; 5459 5460 if (ParseRes == MatchOperand_Success && !Kind.empty()) 5461 return Error(SRegLoc, 5462 "sve predicate register without type specifier expected"); 5463 } 5464 5465 if (ParseRes != MatchOperand_Success) 5466 return Error(SRegLoc, "register name or alias expected"); 5467 5468 // Shouldn't be anything else. 5469 if (parseToken(AsmToken::EndOfStatement, 5470 "unexpected input in .req directive")) 5471 return true; 5472 5473 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum); 5474 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair) 5475 Warning(L, "ignoring redefinition of register alias '" + Name + "'"); 5476 5477 return false; 5478 } 5479 5480 /// parseDirectiveUneq 5481 /// ::= .unreq registername 5482 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) { 5483 MCAsmParser &Parser = getParser(); 5484 if (getTok().isNot(AsmToken::Identifier)) 5485 return TokError("unexpected input in .unreq directive."); 5486 RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); 5487 Parser.Lex(); // Eat the identifier. 5488 if (parseToken(AsmToken::EndOfStatement)) 5489 return addErrorSuffix("in '.unreq' directive"); 5490 return false; 5491 } 5492 5493 bool AArch64AsmParser::parseDirectiveCFINegateRAState() { 5494 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 5495 return true; 5496 getStreamer().emitCFINegateRAState(); 5497 return false; 5498 } 5499 5500 /// parseDirectiveCFIBKeyFrame 5501 /// ::= .cfi_b_key 5502 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() { 5503 if (parseToken(AsmToken::EndOfStatement, 5504 "unexpected token in '.cfi_b_key_frame'")) 5505 return true; 5506 getStreamer().emitCFIBKeyFrame(); 5507 return false; 5508 } 5509 5510 bool 5511 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr, 5512 AArch64MCExpr::VariantKind &ELFRefKind, 5513 MCSymbolRefExpr::VariantKind &DarwinRefKind, 5514 int64_t &Addend) { 5515 ELFRefKind = AArch64MCExpr::VK_INVALID; 5516 DarwinRefKind = MCSymbolRefExpr::VK_None; 5517 Addend = 0; 5518 5519 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) { 5520 ELFRefKind = AE->getKind(); 5521 Expr = AE->getSubExpr(); 5522 } 5523 5524 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr); 5525 if (SE) { 5526 // It's a simple symbol reference with no addend. 5527 DarwinRefKind = SE->getKind(); 5528 return true; 5529 } 5530 5531 // Check that it looks like a symbol + an addend 5532 MCValue Res; 5533 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr); 5534 if (!Relocatable || Res.getSymB()) 5535 return false; 5536 5537 // Treat expressions with an ELFRefKind (like ":abs_g1:3", or 5538 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol. 5539 if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID) 5540 return false; 5541 5542 if (Res.getSymA()) 5543 DarwinRefKind = Res.getSymA()->getKind(); 5544 Addend = Res.getConstant(); 5545 5546 // It's some symbol reference + a constant addend, but really 5547 // shouldn't use both Darwin and ELF syntax. 5548 return ELFRefKind == AArch64MCExpr::VK_INVALID || 5549 DarwinRefKind == MCSymbolRefExpr::VK_None; 5550 } 5551 5552 /// Force static initialization. 5553 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() { 5554 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget()); 5555 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget()); 5556 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target()); 5557 RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target()); 5558 RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target()); 5559 } 5560 5561 #define GET_REGISTER_MATCHER 5562 #define GET_SUBTARGET_FEATURE_NAME 5563 #define GET_MATCHER_IMPLEMENTATION 5564 #define GET_MNEMONIC_SPELL_CHECKER 5565 #include "AArch64GenAsmMatcher.inc" 5566 5567 // Define this matcher function after the auto-generated include so we 5568 // have the match class enum definitions. 5569 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, 5570 unsigned Kind) { 5571 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp); 5572 // If the kind is a token for a literal immediate, check if our asm 5573 // operand matches. This is for InstAliases which have a fixed-value 5574 // immediate in the syntax. 5575 int64_t ExpectedVal; 5576 switch (Kind) { 5577 default: 5578 return Match_InvalidOperand; 5579 case MCK__HASH_0: 5580 ExpectedVal = 0; 5581 break; 5582 case MCK__HASH_1: 5583 ExpectedVal = 1; 5584 break; 5585 case MCK__HASH_12: 5586 ExpectedVal = 12; 5587 break; 5588 case MCK__HASH_16: 5589 ExpectedVal = 16; 5590 break; 5591 case MCK__HASH_2: 5592 ExpectedVal = 2; 5593 break; 5594 case MCK__HASH_24: 5595 ExpectedVal = 24; 5596 break; 5597 case MCK__HASH_3: 5598 ExpectedVal = 3; 5599 break; 5600 case MCK__HASH_32: 5601 ExpectedVal = 32; 5602 break; 5603 case MCK__HASH_4: 5604 ExpectedVal = 4; 5605 break; 5606 case MCK__HASH_48: 5607 ExpectedVal = 48; 5608 break; 5609 case MCK__HASH_6: 5610 ExpectedVal = 6; 5611 break; 5612 case MCK__HASH_64: 5613 ExpectedVal = 64; 5614 break; 5615 case MCK__HASH_8: 5616 ExpectedVal = 8; 5617 break; 5618 } 5619 if (!Op.isImm()) 5620 return Match_InvalidOperand; 5621 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); 5622 if (!CE) 5623 return Match_InvalidOperand; 5624 if (CE->getValue() == ExpectedVal) 5625 return Match_Success; 5626 return Match_InvalidOperand; 5627 } 5628 5629 OperandMatchResultTy 5630 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) { 5631 5632 SMLoc S = getLoc(); 5633 5634 if (getParser().getTok().isNot(AsmToken::Identifier)) { 5635 Error(S, "expected register"); 5636 return MatchOperand_ParseFail; 5637 } 5638 5639 unsigned FirstReg; 5640 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg); 5641 if (Res != MatchOperand_Success) 5642 return MatchOperand_ParseFail; 5643 5644 const MCRegisterClass &WRegClass = 5645 AArch64MCRegisterClasses[AArch64::GPR32RegClassID]; 5646 const MCRegisterClass &XRegClass = 5647 AArch64MCRegisterClasses[AArch64::GPR64RegClassID]; 5648 5649 bool isXReg = XRegClass.contains(FirstReg), 5650 isWReg = WRegClass.contains(FirstReg); 5651 if (!isXReg && !isWReg) { 5652 Error(S, "expected first even register of a " 5653 "consecutive same-size even/odd register pair"); 5654 return MatchOperand_ParseFail; 5655 } 5656 5657 const MCRegisterInfo *RI = getContext().getRegisterInfo(); 5658 unsigned FirstEncoding = RI->getEncodingValue(FirstReg); 5659 5660 if (FirstEncoding & 0x1) { 5661 Error(S, "expected first even register of a " 5662 "consecutive same-size even/odd register pair"); 5663 return MatchOperand_ParseFail; 5664 } 5665 5666 if (getParser().getTok().isNot(AsmToken::Comma)) { 5667 Error(getLoc(), "expected comma"); 5668 return MatchOperand_ParseFail; 5669 } 5670 // Eat the comma 5671 getParser().Lex(); 5672 5673 SMLoc E = getLoc(); 5674 unsigned SecondReg; 5675 Res = tryParseScalarRegister(SecondReg); 5676 if (Res != MatchOperand_Success) 5677 return MatchOperand_ParseFail; 5678 5679 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 || 5680 (isXReg && !XRegClass.contains(SecondReg)) || 5681 (isWReg && !WRegClass.contains(SecondReg))) { 5682 Error(E,"expected second odd register of a " 5683 "consecutive same-size even/odd register pair"); 5684 return MatchOperand_ParseFail; 5685 } 5686 5687 unsigned Pair = 0; 5688 if (isXReg) { 5689 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64, 5690 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]); 5691 } else { 5692 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32, 5693 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]); 5694 } 5695 5696 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S, 5697 getLoc(), getContext())); 5698 5699 return MatchOperand_Success; 5700 } 5701 5702 template <bool ParseShiftExtend, bool ParseSuffix> 5703 OperandMatchResultTy 5704 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) { 5705 const SMLoc S = getLoc(); 5706 // Check for a SVE vector register specifier first. 5707 unsigned RegNum; 5708 StringRef Kind; 5709 5710 OperandMatchResultTy Res = 5711 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector); 5712 5713 if (Res != MatchOperand_Success) 5714 return Res; 5715 5716 if (ParseSuffix && Kind.empty()) 5717 return MatchOperand_NoMatch; 5718 5719 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector); 5720 if (!KindRes) 5721 return MatchOperand_NoMatch; 5722 5723 unsigned ElementWidth = KindRes->second; 5724 5725 // No shift/extend is the default. 5726 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) { 5727 Operands.push_back(AArch64Operand::CreateVectorReg( 5728 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext())); 5729 5730 OperandMatchResultTy Res = tryParseVectorIndex(Operands); 5731 if (Res == MatchOperand_ParseFail) 5732 return MatchOperand_ParseFail; 5733 return MatchOperand_Success; 5734 } 5735 5736 // Eat the comma 5737 getParser().Lex(); 5738 5739 // Match the shift 5740 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd; 5741 Res = tryParseOptionalShiftExtend(ExtOpnd); 5742 if (Res != MatchOperand_Success) 5743 return Res; 5744 5745 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get()); 5746 Operands.push_back(AArch64Operand::CreateVectorReg( 5747 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(), 5748 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(), 5749 Ext->hasShiftExtendAmount())); 5750 5751 return MatchOperand_Success; 5752 } 5753 5754 OperandMatchResultTy 5755 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) { 5756 MCAsmParser &Parser = getParser(); 5757 5758 SMLoc SS = getLoc(); 5759 const AsmToken &TokE = Parser.getTok(); 5760 bool IsHash = TokE.is(AsmToken::Hash); 5761 5762 if (!IsHash && TokE.isNot(AsmToken::Identifier)) 5763 return MatchOperand_NoMatch; 5764 5765 int64_t Pattern; 5766 if (IsHash) { 5767 Parser.Lex(); // Eat hash 5768 5769 // Parse the immediate operand. 5770 const MCExpr *ImmVal; 5771 SS = getLoc(); 5772 if (Parser.parseExpression(ImmVal)) 5773 return MatchOperand_ParseFail; 5774 5775 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal); 5776 if (!MCE) 5777 return MatchOperand_ParseFail; 5778 5779 Pattern = MCE->getValue(); 5780 } else { 5781 // Parse the pattern 5782 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString()); 5783 if (!Pat) 5784 return MatchOperand_NoMatch; 5785 5786 Parser.Lex(); 5787 Pattern = Pat->Encoding; 5788 assert(Pattern >= 0 && Pattern < 32); 5789 } 5790 5791 Operands.push_back( 5792 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()), 5793 SS, getLoc(), getContext())); 5794 5795 return MatchOperand_Success; 5796 } 5797