xref: /freebsd/contrib/llvm-project/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSet.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCExpr.h"
31 #include "llvm/MC/MCInst.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCInstrInfo.h"
34 #include "llvm/MC/MCParser/MCAsmLexer.h"
35 #include "llvm/MC/MCParser/MCAsmParser.h"
36 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
37 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
38 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/MC/MCSection.h"
42 #include "llvm/MC/MCStreamer.h"
43 #include "llvm/MC/MCSubtargetInfo.h"
44 #include "llvm/MC/MCSymbol.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Support/ARMBuildAttributes.h"
47 #include "llvm/Support/ARMEHABI.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Support/SMLoc.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/TargetParser/SubtargetFeature.h"
57 #include "llvm/TargetParser/TargetParser.h"
58 #include "llvm/TargetParser/Triple.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstddef>
62 #include <cstdint>
63 #include <iterator>
64 #include <limits>
65 #include <memory>
66 #include <optional>
67 #include <string>
68 #include <utility>
69 #include <vector>
70 
71 #define DEBUG_TYPE "asm-parser"
72 
73 using namespace llvm;
74 
75 namespace {
76 class ARMOperand;
77 
78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79 
80 static cl::opt<ImplicitItModeTy> ImplicitItMode(
81     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82     cl::desc("Allow conditional instructions outdside of an IT block"),
83     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84                           "Accept in both ISAs, emit implicit ITs in Thumb"),
85                clEnumValN(ImplicitItModeTy::Never, "never",
86                           "Warn in ARM, reject in Thumb"),
87                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88                           "Accept in ARM, reject in Thumb"),
89                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90                           "Warn in ARM, emit implicit ITs in Thumb")));
91 
92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93                                         cl::init(false));
94 
95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96 
extractITMaskBit(unsigned Mask,unsigned Position)97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98   // Position==0 means we're not in an IT block at all. Position==1
99   // means we want the first state bit, which is always 0 (Then).
100   // Position==2 means we want the second state bit, stored at bit 3
101   // of Mask, and so on downwards. So (5 - Position) will shift the
102   // right bit down to bit 0, including the always-0 bit at bit 4 for
103   // the mandatory initial Then.
104   return (Mask >> (5 - Position) & 1);
105 }
106 
107 class UnwindContext {
108   using Locs = SmallVector<SMLoc, 4>;
109 
110   MCAsmParser &Parser;
111   Locs FnStartLocs;
112   Locs CantUnwindLocs;
113   Locs PersonalityLocs;
114   Locs PersonalityIndexLocs;
115   Locs HandlerDataLocs;
116   int FPReg;
117 
118 public:
UnwindContext(MCAsmParser & P)119   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120 
hasFnStart() const121   bool hasFnStart() const { return !FnStartLocs.empty(); }
cantUnwind() const122   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
hasHandlerData() const123   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124 
hasPersonality() const125   bool hasPersonality() const {
126     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127   }
128 
recordFnStart(SMLoc L)129   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
recordCantUnwind(SMLoc L)130   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
recordPersonality(SMLoc L)131   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
recordHandlerData(SMLoc L)132   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
recordPersonalityIndex(SMLoc L)133   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134 
saveFPReg(int Reg)135   void saveFPReg(int Reg) { FPReg = Reg; }
getFPReg() const136   int getFPReg() const { return FPReg; }
137 
emitFnStartLocNotes() const138   void emitFnStartLocNotes() const {
139     for (const SMLoc &Loc : FnStartLocs)
140       Parser.Note(Loc, ".fnstart was specified here");
141   }
142 
emitCantUnwindLocNotes() const143   void emitCantUnwindLocNotes() const {
144     for (const SMLoc &Loc : CantUnwindLocs)
145       Parser.Note(Loc, ".cantunwind was specified here");
146   }
147 
emitHandlerDataLocNotes() const148   void emitHandlerDataLocNotes() const {
149     for (const SMLoc &Loc : HandlerDataLocs)
150       Parser.Note(Loc, ".handlerdata was specified here");
151   }
152 
emitPersonalityLocNotes() const153   void emitPersonalityLocNotes() const {
154     for (Locs::const_iterator PI = PersonalityLocs.begin(),
155                               PE = PersonalityLocs.end(),
156                               PII = PersonalityIndexLocs.begin(),
157                               PIE = PersonalityIndexLocs.end();
158          PI != PE || PII != PIE;) {
159       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
160         Parser.Note(*PI++, ".personality was specified here");
161       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
162         Parser.Note(*PII++, ".personalityindex was specified here");
163       else
164         llvm_unreachable(".personality and .personalityindex cannot be "
165                          "at the same location");
166     }
167   }
168 
reset()169   void reset() {
170     FnStartLocs = Locs();
171     CantUnwindLocs = Locs();
172     PersonalityLocs = Locs();
173     HandlerDataLocs = Locs();
174     PersonalityIndexLocs = Locs();
175     FPReg = ARM::SP;
176   }
177 };
178 
179 // Various sets of ARM instruction mnemonics which are used by the asm parser
180 class ARMMnemonicSets {
181   StringSet<> CDE;
182   StringSet<> CDEWithVPTSuffix;
183 public:
184   ARMMnemonicSets(const MCSubtargetInfo &STI);
185 
186   /// Returns true iff a given mnemonic is a CDE instruction
isCDEInstr(StringRef Mnemonic)187   bool isCDEInstr(StringRef Mnemonic) {
188     // Quick check before searching the set
189     if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
190       return false;
191     return CDE.count(Mnemonic);
192   }
193 
194   /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
195   /// (possibly with a predication suffix "e" or "t")
isVPTPredicableCDEInstr(StringRef Mnemonic)196   bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
197     if (!Mnemonic.starts_with("vcx"))
198       return false;
199     return CDEWithVPTSuffix.count(Mnemonic);
200   }
201 
202   /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
203   /// (possibly with a condition suffix)
isITPredicableCDEInstr(StringRef Mnemonic)204   bool isITPredicableCDEInstr(StringRef Mnemonic) {
205     if (!Mnemonic.starts_with("cx"))
206       return false;
207     return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
208            Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
209            Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
210   }
211 
212   /// Return true iff a given mnemonic is an integer CDE instruction with
213   /// dual-register destination
isCDEDualRegInstr(StringRef Mnemonic)214   bool isCDEDualRegInstr(StringRef Mnemonic) {
215     if (!Mnemonic.starts_with("cx"))
216       return false;
217     return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
218            Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
219            Mnemonic == "cx3d" || Mnemonic == "cx3da";
220   }
221 };
222 
ARMMnemonicSets(const MCSubtargetInfo & STI)223 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
224   for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
225                              "cx2", "cx2a", "cx2d", "cx2da",
226                              "cx3", "cx3a", "cx3d", "cx3da", })
227     CDE.insert(Mnemonic);
228   for (StringRef Mnemonic :
229        {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
230     CDE.insert(Mnemonic);
231     CDEWithVPTSuffix.insert(Mnemonic);
232     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
233     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
234   }
235 }
236 
237 class ARMAsmParser : public MCTargetAsmParser {
238   const MCRegisterInfo *MRI;
239   UnwindContext UC;
240   ARMMnemonicSets MS;
241 
getTargetStreamer()242   ARMTargetStreamer &getTargetStreamer() {
243     assert(getParser().getStreamer().getTargetStreamer() &&
244            "do not have a target streamer");
245     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
246     return static_cast<ARMTargetStreamer &>(TS);
247   }
248 
249   // Map of register aliases registers via the .req directive.
250   StringMap<unsigned> RegisterReqs;
251 
252   bool NextSymbolIsThumb;
253 
useImplicitITThumb() const254   bool useImplicitITThumb() const {
255     return ImplicitItMode == ImplicitItModeTy::Always ||
256            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
257   }
258 
useImplicitITARM() const259   bool useImplicitITARM() const {
260     return ImplicitItMode == ImplicitItModeTy::Always ||
261            ImplicitItMode == ImplicitItModeTy::ARMOnly;
262   }
263 
264   struct {
265     ARMCC::CondCodes Cond;    // Condition for IT block.
266     unsigned Mask:4;          // Condition mask for instructions.
267                               // Starting at first 1 (from lsb).
268                               //   '1'  condition as indicated in IT.
269                               //   '0'  inverse of condition (else).
270                               // Count of instructions in IT block is
271                               // 4 - trailingzeroes(mask)
272                               // Note that this does not have the same encoding
273                               // as in the IT instruction, which also depends
274                               // on the low bit of the condition code.
275 
276     unsigned CurPosition;     // Current position in parsing of IT
277                               // block. In range [0,4], with 0 being the IT
278                               // instruction itself. Initialized according to
279                               // count of instructions in block.  ~0U if no
280                               // active IT block.
281 
282     bool IsExplicit;          // true  - The IT instruction was present in the
283                               //         input, we should not modify it.
284                               // false - The IT instruction was added
285                               //         implicitly, we can extend it if that
286                               //         would be legal.
287   } ITState;
288 
289   SmallVector<MCInst, 4> PendingConditionalInsts;
290 
flushPendingInstructions(MCStreamer & Out)291   void flushPendingInstructions(MCStreamer &Out) override {
292     if (!inImplicitITBlock()) {
293       assert(PendingConditionalInsts.size() == 0);
294       return;
295     }
296 
297     // Emit the IT instruction
298     MCInst ITInst;
299     ITInst.setOpcode(ARM::t2IT);
300     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
301     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
302     Out.emitInstruction(ITInst, getSTI());
303 
304     // Emit the conditional instructions
305     assert(PendingConditionalInsts.size() <= 4);
306     for (const MCInst &Inst : PendingConditionalInsts) {
307       Out.emitInstruction(Inst, getSTI());
308     }
309     PendingConditionalInsts.clear();
310 
311     // Clear the IT state
312     ITState.Mask = 0;
313     ITState.CurPosition = ~0U;
314   }
315 
inITBlock()316   bool inITBlock() { return ITState.CurPosition != ~0U; }
inExplicitITBlock()317   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
inImplicitITBlock()318   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
319 
lastInITBlock()320   bool lastInITBlock() {
321     return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
322   }
323 
forwardITPosition()324   void forwardITPosition() {
325     if (!inITBlock()) return;
326     // Move to the next instruction in the IT block, if there is one. If not,
327     // mark the block as done, except for implicit IT blocks, which we leave
328     // open until we find an instruction that can't be added to it.
329     unsigned TZ = llvm::countr_zero(ITState.Mask);
330     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
331       ITState.CurPosition = ~0U; // Done with the IT block after this.
332   }
333 
334   // Rewind the state of the current IT block, removing the last slot from it.
rewindImplicitITPosition()335   void rewindImplicitITPosition() {
336     assert(inImplicitITBlock());
337     assert(ITState.CurPosition > 1);
338     ITState.CurPosition--;
339     unsigned TZ = llvm::countr_zero(ITState.Mask);
340     unsigned NewMask = 0;
341     NewMask |= ITState.Mask & (0xC << TZ);
342     NewMask |= 0x2 << TZ;
343     ITState.Mask = NewMask;
344   }
345 
346   // Rewind the state of the current IT block, removing the last slot from it.
347   // If we were at the first slot, this closes the IT block.
discardImplicitITBlock()348   void discardImplicitITBlock() {
349     assert(inImplicitITBlock());
350     assert(ITState.CurPosition == 1);
351     ITState.CurPosition = ~0U;
352   }
353 
354   // Get the condition code corresponding to the current IT block slot.
currentITCond()355   ARMCC::CondCodes currentITCond() {
356     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
357     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
358   }
359 
360   // Invert the condition of the current IT block slot without changing any
361   // other slots in the same block.
invertCurrentITCondition()362   void invertCurrentITCondition() {
363     if (ITState.CurPosition == 1) {
364       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
365     } else {
366       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
367     }
368   }
369 
370   // Returns true if the current IT block is full (all 4 slots used).
isITBlockFull()371   bool isITBlockFull() {
372     return inITBlock() && (ITState.Mask & 1);
373   }
374 
375   // Extend the current implicit IT block to have one more slot with the given
376   // condition code.
extendImplicitITBlock(ARMCC::CondCodes Cond)377   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
378     assert(inImplicitITBlock());
379     assert(!isITBlockFull());
380     assert(Cond == ITState.Cond ||
381            Cond == ARMCC::getOppositeCondition(ITState.Cond));
382     unsigned TZ = llvm::countr_zero(ITState.Mask);
383     unsigned NewMask = 0;
384     // Keep any existing condition bits.
385     NewMask |= ITState.Mask & (0xE << TZ);
386     // Insert the new condition bit.
387     NewMask |= (Cond != ITState.Cond) << TZ;
388     // Move the trailing 1 down one bit.
389     NewMask |= 1 << (TZ - 1);
390     ITState.Mask = NewMask;
391   }
392 
393   // Create a new implicit IT block with a dummy condition code.
startImplicitITBlock()394   void startImplicitITBlock() {
395     assert(!inITBlock());
396     ITState.Cond = ARMCC::AL;
397     ITState.Mask = 8;
398     ITState.CurPosition = 1;
399     ITState.IsExplicit = false;
400   }
401 
402   // Create a new explicit IT block with the given condition and mask.
403   // The mask should be in the format used in ARMOperand and
404   // MCOperand, with a 1 implying 'e', regardless of the low bit of
405   // the condition.
startExplicitITBlock(ARMCC::CondCodes Cond,unsigned Mask)406   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
407     assert(!inITBlock());
408     ITState.Cond = Cond;
409     ITState.Mask = Mask;
410     ITState.CurPosition = 0;
411     ITState.IsExplicit = true;
412   }
413 
414   struct {
415     unsigned Mask : 4;
416     unsigned CurPosition;
417   } VPTState;
inVPTBlock()418   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
forwardVPTPosition()419   void forwardVPTPosition() {
420     if (!inVPTBlock()) return;
421     unsigned TZ = llvm::countr_zero(VPTState.Mask);
422     if (++VPTState.CurPosition == 5 - TZ)
423       VPTState.CurPosition = ~0U;
424   }
425 
Note(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)426   void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
427     return getParser().Note(L, Msg, Range);
428   }
429 
Warning(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)430   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
431     return getParser().Warning(L, Msg, Range);
432   }
433 
Error(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)434   bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
435     return getParser().Error(L, Msg, Range);
436   }
437 
438   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
439                            unsigned MnemonicOpsEndInd, unsigned ListIndex,
440                            bool IsARPop = false);
441   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
442                            unsigned MnemonicOpsEndInd, unsigned ListIndex);
443 
444   int tryParseRegister(bool AllowOutofBoundReg = false);
445   bool tryParseRegisterWithWriteBack(OperandVector &);
446   int tryParseShiftRegister(OperandVector &);
447   std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
448   bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
449                          bool AllowRAAC = false,
450                          bool AllowOutOfBoundReg = false);
451   bool parseMemory(OperandVector &);
452   bool parseOperand(OperandVector &, StringRef Mnemonic);
453   bool parseImmExpr(int64_t &Out);
454   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
455   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
456                               unsigned &ShiftAmount);
457   bool parseLiteralValues(unsigned Size, SMLoc L);
458   bool parseDirectiveThumb(SMLoc L);
459   bool parseDirectiveARM(SMLoc L);
460   bool parseDirectiveThumbFunc(SMLoc L);
461   bool parseDirectiveCode(SMLoc L);
462   bool parseDirectiveSyntax(SMLoc L);
463   bool parseDirectiveReq(StringRef Name, SMLoc L);
464   bool parseDirectiveUnreq(SMLoc L);
465   bool parseDirectiveArch(SMLoc L);
466   bool parseDirectiveEabiAttr(SMLoc L);
467   bool parseDirectiveCPU(SMLoc L);
468   bool parseDirectiveFPU(SMLoc L);
469   bool parseDirectiveFnStart(SMLoc L);
470   bool parseDirectiveFnEnd(SMLoc L);
471   bool parseDirectiveCantUnwind(SMLoc L);
472   bool parseDirectivePersonality(SMLoc L);
473   bool parseDirectiveHandlerData(SMLoc L);
474   bool parseDirectiveSetFP(SMLoc L);
475   bool parseDirectivePad(SMLoc L);
476   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
477   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
478   bool parseDirectiveLtorg(SMLoc L);
479   bool parseDirectiveEven(SMLoc L);
480   bool parseDirectivePersonalityIndex(SMLoc L);
481   bool parseDirectiveUnwindRaw(SMLoc L);
482   bool parseDirectiveTLSDescSeq(SMLoc L);
483   bool parseDirectiveMovSP(SMLoc L);
484   bool parseDirectiveObjectArch(SMLoc L);
485   bool parseDirectiveArchExtension(SMLoc L);
486   bool parseDirectiveAlign(SMLoc L);
487   bool parseDirectiveThumbSet(SMLoc L);
488 
489   bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
490   bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
491   bool parseDirectiveSEHSaveSP(SMLoc L);
492   bool parseDirectiveSEHSaveFRegs(SMLoc L);
493   bool parseDirectiveSEHSaveLR(SMLoc L);
494   bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
495   bool parseDirectiveSEHNop(SMLoc L, bool Wide);
496   bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
497   bool parseDirectiveSEHEpilogEnd(SMLoc L);
498   bool parseDirectiveSEHCustom(SMLoc L);
499 
500   std::unique_ptr<ARMOperand> defaultCondCodeOp();
501   std::unique_ptr<ARMOperand> defaultCCOutOp();
502   std::unique_ptr<ARMOperand> defaultVPTPredOp();
503 
504   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
505   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
506                           ARMCC::CondCodes &PredicationCode,
507                           ARMVCC::VPTCodes &VPTPredicationCode,
508                           bool &CarrySetting, unsigned &ProcessorIMod,
509                           StringRef &ITMask);
510   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
511                              StringRef FullInst, bool &CanAcceptCarrySet,
512                              bool &CanAcceptPredicationCode,
513                              bool &CanAcceptVPTPredicationCode);
514   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
515 
516   void tryConvertingToTwoOperandForm(StringRef Mnemonic,
517                                      ARMCC::CondCodes PredicationCode,
518                                      bool CarrySetting, OperandVector &Operands,
519                                      unsigned MnemonicOpsEndInd);
520 
521   bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
522                                 unsigned MnemonicOpsEndInd);
523 
isThumb() const524   bool isThumb() const {
525     // FIXME: Can tablegen auto-generate this?
526     return getSTI().hasFeature(ARM::ModeThumb);
527   }
528 
isThumbOne() const529   bool isThumbOne() const {
530     return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
531   }
532 
isThumbTwo() const533   bool isThumbTwo() const {
534     return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
535   }
536 
hasThumb() const537   bool hasThumb() const {
538     return getSTI().hasFeature(ARM::HasV4TOps);
539   }
540 
hasThumb2() const541   bool hasThumb2() const {
542     return getSTI().hasFeature(ARM::FeatureThumb2);
543   }
544 
hasV6Ops() const545   bool hasV6Ops() const {
546     return getSTI().hasFeature(ARM::HasV6Ops);
547   }
548 
hasV6T2Ops() const549   bool hasV6T2Ops() const {
550     return getSTI().hasFeature(ARM::HasV6T2Ops);
551   }
552 
hasV6MOps() const553   bool hasV6MOps() const {
554     return getSTI().hasFeature(ARM::HasV6MOps);
555   }
556 
hasV7Ops() const557   bool hasV7Ops() const {
558     return getSTI().hasFeature(ARM::HasV7Ops);
559   }
560 
hasV8Ops() const561   bool hasV8Ops() const {
562     return getSTI().hasFeature(ARM::HasV8Ops);
563   }
564 
hasV8MBaseline() const565   bool hasV8MBaseline() const {
566     return getSTI().hasFeature(ARM::HasV8MBaselineOps);
567   }
568 
hasV8MMainline() const569   bool hasV8MMainline() const {
570     return getSTI().hasFeature(ARM::HasV8MMainlineOps);
571   }
hasV8_1MMainline() const572   bool hasV8_1MMainline() const {
573     return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
574   }
hasMVEFloat() const575   bool hasMVEFloat() const {
576     return getSTI().hasFeature(ARM::HasMVEFloatOps);
577   }
hasCDE() const578   bool hasCDE() const {
579     return getSTI().hasFeature(ARM::HasCDEOps);
580   }
has8MSecExt() const581   bool has8MSecExt() const {
582     return getSTI().hasFeature(ARM::Feature8MSecExt);
583   }
584 
hasARM() const585   bool hasARM() const {
586     return !getSTI().hasFeature(ARM::FeatureNoARM);
587   }
588 
hasDSP() const589   bool hasDSP() const {
590     return getSTI().hasFeature(ARM::FeatureDSP);
591   }
592 
hasD32() const593   bool hasD32() const {
594     return getSTI().hasFeature(ARM::FeatureD32);
595   }
596 
hasV8_1aOps() const597   bool hasV8_1aOps() const {
598     return getSTI().hasFeature(ARM::HasV8_1aOps);
599   }
600 
hasRAS() const601   bool hasRAS() const {
602     return getSTI().hasFeature(ARM::FeatureRAS);
603   }
604 
SwitchMode()605   void SwitchMode() {
606     MCSubtargetInfo &STI = copySTI();
607     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
608     setAvailableFeatures(FB);
609   }
610 
611   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
612 
isMClass() const613   bool isMClass() const {
614     return getSTI().hasFeature(ARM::FeatureMClass);
615   }
616 
617   /// @name Auto-generated Match Functions
618   /// {
619 
620 #define GET_ASSEMBLER_HEADER
621 #include "ARMGenAsmMatcher.inc"
622 
623   /// }
624 
625   ParseStatus parseITCondCode(OperandVector &);
626   ParseStatus parseCoprocNumOperand(OperandVector &);
627   ParseStatus parseCoprocRegOperand(OperandVector &);
628   ParseStatus parseCoprocOptionOperand(OperandVector &);
629   ParseStatus parseMemBarrierOptOperand(OperandVector &);
630   ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
631   ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
632   ParseStatus parseProcIFlagsOperand(OperandVector &);
633   ParseStatus parseMSRMaskOperand(OperandVector &);
634   ParseStatus parseBankedRegOperand(OperandVector &);
635   ParseStatus parsePKHImm(OperandVector &O, ARM_AM::ShiftOpc, int Low,
636                           int High);
parsePKHLSLImm(OperandVector & O)637   ParseStatus parsePKHLSLImm(OperandVector &O) {
638     return parsePKHImm(O, ARM_AM::lsl, 0, 31);
639   }
parsePKHASRImm(OperandVector & O)640   ParseStatus parsePKHASRImm(OperandVector &O) {
641     return parsePKHImm(O, ARM_AM::asr, 1, 32);
642   }
643   ParseStatus parseSetEndImm(OperandVector &);
644   ParseStatus parseShifterImm(OperandVector &);
645   ParseStatus parseRotImm(OperandVector &);
646   ParseStatus parseModImm(OperandVector &);
647   ParseStatus parseBitfield(OperandVector &);
648   ParseStatus parsePostIdxReg(OperandVector &);
649   ParseStatus parseAM3Offset(OperandVector &);
650   ParseStatus parseFPImm(OperandVector &);
651   ParseStatus parseVectorList(OperandVector &);
652   ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653                               SMLoc &EndLoc);
654 
655   // Asm Match Converter Methods
656   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659 
660   bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
661                            unsigned MnemonicOpsEndInd);
662   bool processInstruction(MCInst &Inst, const OperandVector &Ops,
663                           unsigned MnemonicOpsEndInd, MCStreamer &Out);
664   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
665                                         OperandVector &Operands,
666                                         unsigned MnemonicOpsEndInd);
667   bool isITBlockTerminator(MCInst &Inst) const;
668 
669   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
670                          unsigned MnemonicOpsEndInd);
671   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
672                         bool ARMMode, bool Writeback,
673                         unsigned MnemonicOpsEndInd);
674 
675 public:
676   enum ARMMatchResultTy {
677     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
678     Match_RequiresNotITBlock,
679     Match_RequiresV6,
680     Match_RequiresThumb2,
681     Match_RequiresV8,
682     Match_RequiresFlagSetting,
683 #define GET_OPERAND_DIAGNOSTIC_TYPES
684 #include "ARMGenAsmMatcher.inc"
685 
686   };
687 
ARMAsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)688   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
689                const MCInstrInfo &MII, const MCTargetOptions &Options)
690     : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
691     MCAsmParserExtension::Initialize(Parser);
692 
693     // Cache the MCRegisterInfo.
694     MRI = getContext().getRegisterInfo();
695 
696     // Initialize the set of available features.
697     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
698 
699     // Add build attributes based on the selected target.
700     if (AddBuildAttributes)
701       getTargetStreamer().emitTargetAttributes(STI);
702 
703     // Not in an ITBlock to start with.
704     ITState.CurPosition = ~0U;
705 
706     VPTState.CurPosition = ~0U;
707 
708     NextSymbolIsThumb = false;
709   }
710 
711   // Implementation of the MCTargetAsmParser interface:
712   bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
713   ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
714                                SMLoc &EndLoc) override;
715   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
716                         SMLoc NameLoc, OperandVector &Operands) override;
717   bool ParseDirective(AsmToken DirectiveID) override;
718 
719   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
720                                       unsigned Kind) override;
721   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
722   unsigned
723   checkEarlyTargetMatchPredicate(MCInst &Inst,
724                                  const OperandVector &Operands) override;
725 
726   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
727                                OperandVector &Operands, MCStreamer &Out,
728                                uint64_t &ErrorInfo,
729                                bool MatchingInlineAsm) override;
730   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
731                             SmallVectorImpl<NearMissInfo> &NearMisses,
732                             bool MatchingInlineAsm, bool &EmitInITBlock,
733                             MCStreamer &Out);
734 
735   struct NearMissMessage {
736     SMLoc Loc;
737     SmallString<128> Message;
738   };
739 
740   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
741 
742   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
743                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
744                         SMLoc IDLoc, OperandVector &Operands);
745   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
746                         OperandVector &Operands);
747 
748   MCSymbolRefExpr::VariantKind
749   getVariantKindForName(StringRef Name) const override;
750 
751   void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
752 
753   void onLabelParsed(MCSymbol *Symbol) override;
754 
getInstrDesc(unsigned int Opcode) const755   const MCInstrDesc &getInstrDesc(unsigned int Opcode) const {
756     return MII.get(Opcode);
757   }
758 
hasMVE() const759   bool hasMVE() const { return getSTI().hasFeature(ARM::HasMVEIntegerOps); }
760 
761   // Return the low-subreg of a given Q register.
getDRegFromQReg(unsigned QReg) const762   unsigned getDRegFromQReg(unsigned QReg) const {
763     return MRI->getSubReg(QReg, ARM::dsub_0);
764   }
765 
getMRI() const766   const MCRegisterInfo *getMRI() const { return MRI; }
767 };
768 
769 /// ARMOperand - Instances of this class represent a parsed ARM machine
770 /// operand.
771 class ARMOperand : public MCParsedAsmOperand {
772   enum KindTy {
773     k_CondCode,
774     k_VPTPred,
775     k_CCOut,
776     k_ITCondMask,
777     k_CoprocNum,
778     k_CoprocReg,
779     k_CoprocOption,
780     k_Immediate,
781     k_MemBarrierOpt,
782     k_InstSyncBarrierOpt,
783     k_TraceSyncBarrierOpt,
784     k_Memory,
785     k_PostIndexRegister,
786     k_MSRMask,
787     k_BankedReg,
788     k_ProcIFlags,
789     k_VectorIndex,
790     k_Register,
791     k_RegisterList,
792     k_RegisterListWithAPSR,
793     k_DPRRegisterList,
794     k_SPRRegisterList,
795     k_FPSRegisterListWithVPR,
796     k_FPDRegisterListWithVPR,
797     k_VectorList,
798     k_VectorListAllLanes,
799     k_VectorListIndexed,
800     k_ShiftedRegister,
801     k_ShiftedImmediate,
802     k_ShifterImmediate,
803     k_RotateImmediate,
804     k_ModifiedImmediate,
805     k_ConstantPoolImmediate,
806     k_BitfieldDescriptor,
807     k_Token,
808   } Kind;
809 
810   SMLoc StartLoc, EndLoc, AlignmentLoc;
811   SmallVector<unsigned, 8> Registers;
812 
813   ARMAsmParser *Parser;
814 
815   struct CCOp {
816     ARMCC::CondCodes Val;
817   };
818 
819   struct VCCOp {
820     ARMVCC::VPTCodes Val;
821   };
822 
823   struct CopOp {
824     unsigned Val;
825   };
826 
827   struct CoprocOptionOp {
828     unsigned Val;
829   };
830 
831   struct ITMaskOp {
832     unsigned Mask:4;
833   };
834 
835   struct MBOptOp {
836     ARM_MB::MemBOpt Val;
837   };
838 
839   struct ISBOptOp {
840     ARM_ISB::InstSyncBOpt Val;
841   };
842 
843   struct TSBOptOp {
844     ARM_TSB::TraceSyncBOpt Val;
845   };
846 
847   struct IFlagsOp {
848     ARM_PROC::IFlags Val;
849   };
850 
851   struct MMaskOp {
852     unsigned Val;
853   };
854 
855   struct BankedRegOp {
856     unsigned Val;
857   };
858 
859   struct TokOp {
860     const char *Data;
861     unsigned Length;
862   };
863 
864   struct RegOp {
865     unsigned RegNum;
866   };
867 
868   // A vector register list is a sequential list of 1 to 4 registers.
869   struct VectorListOp {
870     unsigned RegNum;
871     unsigned Count;
872     unsigned LaneIndex;
873     bool isDoubleSpaced;
874   };
875 
876   struct VectorIndexOp {
877     unsigned Val;
878   };
879 
880   struct ImmOp {
881     const MCExpr *Val;
882   };
883 
884   /// Combined record for all forms of ARM address expressions.
885   struct MemoryOp {
886     unsigned BaseRegNum;
887     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
888     // was specified.
889     const MCExpr *OffsetImm;  // Offset immediate value
890     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
891     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
892     unsigned ShiftImm;        // shift for OffsetReg.
893     unsigned Alignment;       // 0 = no alignment specified
894     // n = alignment in bytes (2, 4, 8, 16, or 32)
895     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
896   };
897 
898   struct PostIdxRegOp {
899     unsigned RegNum;
900     bool isAdd;
901     ARM_AM::ShiftOpc ShiftTy;
902     unsigned ShiftImm;
903   };
904 
905   struct ShifterImmOp {
906     bool isASR;
907     unsigned Imm;
908   };
909 
910   struct RegShiftedRegOp {
911     ARM_AM::ShiftOpc ShiftTy;
912     unsigned SrcReg;
913     unsigned ShiftReg;
914     unsigned ShiftImm;
915   };
916 
917   struct RegShiftedImmOp {
918     ARM_AM::ShiftOpc ShiftTy;
919     unsigned SrcReg;
920     unsigned ShiftImm;
921   };
922 
923   struct RotImmOp {
924     unsigned Imm;
925   };
926 
927   struct ModImmOp {
928     unsigned Bits;
929     unsigned Rot;
930   };
931 
932   struct BitfieldOp {
933     unsigned LSB;
934     unsigned Width;
935   };
936 
937   union {
938     struct CCOp CC;
939     struct VCCOp VCC;
940     struct CopOp Cop;
941     struct CoprocOptionOp CoprocOption;
942     struct MBOptOp MBOpt;
943     struct ISBOptOp ISBOpt;
944     struct TSBOptOp TSBOpt;
945     struct ITMaskOp ITMask;
946     struct IFlagsOp IFlags;
947     struct MMaskOp MMask;
948     struct BankedRegOp BankedReg;
949     struct TokOp Tok;
950     struct RegOp Reg;
951     struct VectorListOp VectorList;
952     struct VectorIndexOp VectorIndex;
953     struct ImmOp Imm;
954     struct MemoryOp Memory;
955     struct PostIdxRegOp PostIdxReg;
956     struct ShifterImmOp ShifterImm;
957     struct RegShiftedRegOp RegShiftedReg;
958     struct RegShiftedImmOp RegShiftedImm;
959     struct RotImmOp RotImm;
960     struct ModImmOp ModImm;
961     struct BitfieldOp Bitfield;
962   };
963 
964 public:
ARMOperand(KindTy K,ARMAsmParser & Parser)965   ARMOperand(KindTy K, ARMAsmParser &Parser) : Kind(K), Parser(&Parser) {}
966 
967   /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const968   SMLoc getStartLoc() const override { return StartLoc; }
969 
970   /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const971   SMLoc getEndLoc() const override { return EndLoc; }
972 
973   /// getLocRange - Get the range between the first and last token of this
974   /// operand.
getLocRange() const975   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
976 
977   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
getAlignmentLoc() const978   SMLoc getAlignmentLoc() const {
979     assert(Kind == k_Memory && "Invalid access!");
980     return AlignmentLoc;
981   }
982 
getCondCode() const983   ARMCC::CondCodes getCondCode() const {
984     assert(Kind == k_CondCode && "Invalid access!");
985     return CC.Val;
986   }
987 
getVPTPred() const988   ARMVCC::VPTCodes getVPTPred() const {
989     assert(isVPTPred() && "Invalid access!");
990     return VCC.Val;
991   }
992 
getCoproc() const993   unsigned getCoproc() const {
994     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
995     return Cop.Val;
996   }
997 
getToken() const998   StringRef getToken() const {
999     assert(Kind == k_Token && "Invalid access!");
1000     return StringRef(Tok.Data, Tok.Length);
1001   }
1002 
getReg() const1003   MCRegister getReg() const override {
1004     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
1005     return Reg.RegNum;
1006   }
1007 
getRegList() const1008   const SmallVectorImpl<unsigned> &getRegList() const {
1009     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011             Kind == k_FPSRegisterListWithVPR ||
1012             Kind == k_FPDRegisterListWithVPR) &&
1013            "Invalid access!");
1014     return Registers;
1015   }
1016 
getImm() const1017   const MCExpr *getImm() const {
1018     assert(isImm() && "Invalid access!");
1019     return Imm.Val;
1020   }
1021 
getConstantPoolImm() const1022   const MCExpr *getConstantPoolImm() const {
1023     assert(isConstantPoolImm() && "Invalid access!");
1024     return Imm.Val;
1025   }
1026 
getVectorIndex() const1027   unsigned getVectorIndex() const {
1028     assert(Kind == k_VectorIndex && "Invalid access!");
1029     return VectorIndex.Val;
1030   }
1031 
getMemBarrierOpt() const1032   ARM_MB::MemBOpt getMemBarrierOpt() const {
1033     assert(Kind == k_MemBarrierOpt && "Invalid access!");
1034     return MBOpt.Val;
1035   }
1036 
getInstSyncBarrierOpt() const1037   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1038     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1039     return ISBOpt.Val;
1040   }
1041 
getTraceSyncBarrierOpt() const1042   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1043     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1044     return TSBOpt.Val;
1045   }
1046 
getProcIFlags() const1047   ARM_PROC::IFlags getProcIFlags() const {
1048     assert(Kind == k_ProcIFlags && "Invalid access!");
1049     return IFlags.Val;
1050   }
1051 
getMSRMask() const1052   unsigned getMSRMask() const {
1053     assert(Kind == k_MSRMask && "Invalid access!");
1054     return MMask.Val;
1055   }
1056 
getBankedReg() const1057   unsigned getBankedReg() const {
1058     assert(Kind == k_BankedReg && "Invalid access!");
1059     return BankedReg.Val;
1060   }
1061 
isCoprocNum() const1062   bool isCoprocNum() const { return Kind == k_CoprocNum; }
isCoprocReg() const1063   bool isCoprocReg() const { return Kind == k_CoprocReg; }
isCoprocOption() const1064   bool isCoprocOption() const { return Kind == k_CoprocOption; }
isCondCode() const1065   bool isCondCode() const { return Kind == k_CondCode; }
isVPTPred() const1066   bool isVPTPred() const { return Kind == k_VPTPred; }
isCCOut() const1067   bool isCCOut() const { return Kind == k_CCOut; }
isITMask() const1068   bool isITMask() const { return Kind == k_ITCondMask; }
isITCondCode() const1069   bool isITCondCode() const { return Kind == k_CondCode; }
isImm() const1070   bool isImm() const override {
1071     return Kind == k_Immediate;
1072   }
1073 
isARMBranchTarget() const1074   bool isARMBranchTarget() const {
1075     if (!isImm()) return false;
1076 
1077     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1078       return CE->getValue() % 4 == 0;
1079     return true;
1080   }
1081 
1082 
isThumbBranchTarget() const1083   bool isThumbBranchTarget() const {
1084     if (!isImm()) return false;
1085 
1086     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1087       return CE->getValue() % 2 == 0;
1088     return true;
1089   }
1090 
1091   // checks whether this operand is an unsigned offset which fits is a field
1092   // of specified width and scaled by a specific number of bits
1093   template<unsigned width, unsigned scale>
isUnsignedOffset() const1094   bool isUnsignedOffset() const {
1095     if (!isImm()) return false;
1096     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1097     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1098       int64_t Val = CE->getValue();
1099       int64_t Align = 1LL << scale;
1100       int64_t Max = Align * ((1LL << width) - 1);
1101       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1102     }
1103     return false;
1104   }
1105 
1106   // checks whether this operand is an signed offset which fits is a field
1107   // of specified width and scaled by a specific number of bits
1108   template<unsigned width, unsigned scale>
isSignedOffset() const1109   bool isSignedOffset() const {
1110     if (!isImm()) return false;
1111     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1112     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1113       int64_t Val = CE->getValue();
1114       int64_t Align = 1LL << scale;
1115       int64_t Max = Align * ((1LL << (width-1)) - 1);
1116       int64_t Min = -Align * (1LL << (width-1));
1117       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1118     }
1119     return false;
1120   }
1121 
1122   // checks whether this operand is an offset suitable for the LE /
1123   // LETP instructions in Arm v8.1M
isLEOffset() const1124   bool isLEOffset() const {
1125     if (!isImm()) return false;
1126     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1127     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1128       int64_t Val = CE->getValue();
1129       return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1130     }
1131     return false;
1132   }
1133 
1134   // checks whether this operand is a memory operand computed as an offset
1135   // applied to PC. the offset may have 8 bits of magnitude and is represented
1136   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1137   // relocable expression...
isThumbMemPC() const1138   bool isThumbMemPC() const {
1139     int64_t Val = 0;
1140     if (isImm()) {
1141       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1142       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1143       if (!CE) return false;
1144       Val = CE->getValue();
1145     }
1146     else if (isGPRMem()) {
1147       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1148       if(Memory.BaseRegNum != ARM::PC) return false;
1149       if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1150         Val = CE->getValue();
1151       else
1152         return false;
1153     }
1154     else return false;
1155     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1156   }
1157 
isFPImm() const1158   bool isFPImm() const {
1159     if (!isImm()) return false;
1160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1161     if (!CE) return false;
1162     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1163     return Val != -1;
1164   }
1165 
1166   template<int64_t N, int64_t M>
isImmediate() const1167   bool isImmediate() const {
1168     if (!isImm()) return false;
1169     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1170     if (!CE) return false;
1171     int64_t Value = CE->getValue();
1172     return Value >= N && Value <= M;
1173   }
1174 
1175   template<int64_t N, int64_t M>
isImmediateS4() const1176   bool isImmediateS4() const {
1177     if (!isImm()) return false;
1178     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1179     if (!CE) return false;
1180     int64_t Value = CE->getValue();
1181     return ((Value & 3) == 0) && Value >= N && Value <= M;
1182   }
1183   template<int64_t N, int64_t M>
isImmediateS2() const1184   bool isImmediateS2() const {
1185     if (!isImm()) return false;
1186     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1187     if (!CE) return false;
1188     int64_t Value = CE->getValue();
1189     return ((Value & 1) == 0) && Value >= N && Value <= M;
1190   }
isFBits16() const1191   bool isFBits16() const {
1192     return isImmediate<0, 17>();
1193   }
isFBits32() const1194   bool isFBits32() const {
1195     return isImmediate<1, 33>();
1196   }
isImm8s4() const1197   bool isImm8s4() const {
1198     return isImmediateS4<-1020, 1020>();
1199   }
isImm7s4() const1200   bool isImm7s4() const {
1201     return isImmediateS4<-508, 508>();
1202   }
isImm7Shift0() const1203   bool isImm7Shift0() const {
1204     return isImmediate<-127, 127>();
1205   }
isImm7Shift1() const1206   bool isImm7Shift1() const {
1207     return isImmediateS2<-255, 255>();
1208   }
isImm7Shift2() const1209   bool isImm7Shift2() const {
1210     return isImmediateS4<-511, 511>();
1211   }
isImm7() const1212   bool isImm7() const {
1213     return isImmediate<-127, 127>();
1214   }
isImm0_1020s4() const1215   bool isImm0_1020s4() const {
1216     return isImmediateS4<0, 1020>();
1217   }
isImm0_508s4() const1218   bool isImm0_508s4() const {
1219     return isImmediateS4<0, 508>();
1220   }
isImm0_508s4Neg() const1221   bool isImm0_508s4Neg() const {
1222     if (!isImm()) return false;
1223     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1224     if (!CE) return false;
1225     int64_t Value = -CE->getValue();
1226     // explicitly exclude zero. we want that to use the normal 0_508 version.
1227     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1228   }
1229 
isImm0_4095Neg() const1230   bool isImm0_4095Neg() const {
1231     if (!isImm()) return false;
1232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1233     if (!CE) return false;
1234     // isImm0_4095Neg is used with 32-bit immediates only.
1235     // 32-bit immediates are zero extended to 64-bit when parsed,
1236     // thus simple -CE->getValue() results in a big negative number,
1237     // not a small positive number as intended
1238     if ((CE->getValue() >> 32) > 0) return false;
1239     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1240     return Value > 0 && Value < 4096;
1241   }
1242 
isImm0_7() const1243   bool isImm0_7() const {
1244     return isImmediate<0, 7>();
1245   }
1246 
isImm1_16() const1247   bool isImm1_16() const {
1248     return isImmediate<1, 16>();
1249   }
1250 
isImm1_32() const1251   bool isImm1_32() const {
1252     return isImmediate<1, 32>();
1253   }
1254 
isImm8_255() const1255   bool isImm8_255() const {
1256     return isImmediate<8, 255>();
1257   }
1258 
isImm0_255Expr() const1259   bool isImm0_255Expr() const {
1260     if (!isImm())
1261       return false;
1262     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1263     // If it's not a constant expression, it'll generate a fixup and be
1264     // handled later.
1265     if (!CE)
1266       return true;
1267     int64_t Value = CE->getValue();
1268     return isUInt<8>(Value);
1269   }
1270 
isImm256_65535Expr() const1271   bool isImm256_65535Expr() const {
1272     if (!isImm()) return false;
1273     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1274     // If it's not a constant expression, it'll generate a fixup and be
1275     // handled later.
1276     if (!CE) return true;
1277     int64_t Value = CE->getValue();
1278     return Value >= 256 && Value < 65536;
1279   }
1280 
isImm0_65535Expr() const1281   bool isImm0_65535Expr() const {
1282     if (!isImm()) return false;
1283     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1284     // If it's not a constant expression, it'll generate a fixup and be
1285     // handled later.
1286     if (!CE) return true;
1287     int64_t Value = CE->getValue();
1288     return Value >= 0 && Value < 65536;
1289   }
1290 
isImm24bit() const1291   bool isImm24bit() const {
1292     return isImmediate<0, 0xffffff + 1>();
1293   }
1294 
isImmThumbSR() const1295   bool isImmThumbSR() const {
1296     return isImmediate<1, 33>();
1297   }
1298 
isPKHLSLImm() const1299   bool isPKHLSLImm() const {
1300     return isImmediate<0, 32>();
1301   }
1302 
isPKHASRImm() const1303   bool isPKHASRImm() const {
1304     return isImmediate<0, 33>();
1305   }
1306 
isAdrLabel() const1307   bool isAdrLabel() const {
1308     // If we have an immediate that's not a constant, treat it as a label
1309     // reference needing a fixup.
1310     if (isImm() && !isa<MCConstantExpr>(getImm()))
1311       return true;
1312 
1313     // If it is a constant, it must fit into a modified immediate encoding.
1314     if (!isImm()) return false;
1315     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316     if (!CE) return false;
1317     int64_t Value = CE->getValue();
1318     return (ARM_AM::getSOImmVal(Value) != -1 ||
1319             ARM_AM::getSOImmVal(-Value) != -1);
1320   }
1321 
isT2SOImm() const1322   bool isT2SOImm() const {
1323     // If we have an immediate that's not a constant, treat it as an expression
1324     // needing a fixup.
1325     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1326       // We want to avoid matching :upper16: and :lower16: as we want these
1327       // expressions to match in isImm0_65535Expr()
1328       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1329       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1330                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1331     }
1332     if (!isImm()) return false;
1333     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1334     if (!CE) return false;
1335     int64_t Value = CE->getValue();
1336     return ARM_AM::getT2SOImmVal(Value) != -1;
1337   }
1338 
isT2SOImmNot() const1339   bool isT2SOImmNot() const {
1340     if (!isImm()) return false;
1341     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342     if (!CE) return false;
1343     int64_t Value = CE->getValue();
1344     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1345       ARM_AM::getT2SOImmVal(~Value) != -1;
1346   }
1347 
isT2SOImmNeg() const1348   bool isT2SOImmNeg() const {
1349     if (!isImm()) return false;
1350     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1351     if (!CE) return false;
1352     int64_t Value = CE->getValue();
1353     // Only use this when not representable as a plain so_imm.
1354     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1355       ARM_AM::getT2SOImmVal(-Value) != -1;
1356   }
1357 
isSetEndImm() const1358   bool isSetEndImm() const {
1359     if (!isImm()) return false;
1360     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1361     if (!CE) return false;
1362     int64_t Value = CE->getValue();
1363     return Value == 1 || Value == 0;
1364   }
1365 
isReg() const1366   bool isReg() const override { return Kind == k_Register; }
isRegList() const1367   bool isRegList() const { return Kind == k_RegisterList; }
isRegListWithAPSR() const1368   bool isRegListWithAPSR() const {
1369     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1370   }
isDReg() const1371   bool isDReg() const {
1372     return isReg() &&
1373            ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg.RegNum);
1374   }
isQReg() const1375   bool isQReg() const {
1376     return isReg() &&
1377            ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg.RegNum);
1378   }
isDPRRegList() const1379   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
isSPRRegList() const1380   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
isFPSRegListWithVPR() const1381   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
isFPDRegListWithVPR() const1382   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
isToken() const1383   bool isToken() const override { return Kind == k_Token; }
isMemBarrierOpt() const1384   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
isInstSyncBarrierOpt() const1385   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
isTraceSyncBarrierOpt() const1386   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
isMem() const1387   bool isMem() const override {
1388       return isGPRMem() || isMVEMem();
1389   }
isMVEMem() const1390   bool isMVEMem() const {
1391     if (Kind != k_Memory)
1392       return false;
1393     if (Memory.BaseRegNum &&
1394         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1395         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1396       return false;
1397     if (Memory.OffsetRegNum &&
1398         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1399             Memory.OffsetRegNum))
1400       return false;
1401     return true;
1402   }
isGPRMem() const1403   bool isGPRMem() const {
1404     if (Kind != k_Memory)
1405       return false;
1406     if (Memory.BaseRegNum &&
1407         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1408       return false;
1409     if (Memory.OffsetRegNum &&
1410         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1411       return false;
1412     return true;
1413   }
isShifterImm() const1414   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
isRegShiftedReg() const1415   bool isRegShiftedReg() const {
1416     return Kind == k_ShiftedRegister &&
1417            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1418                RegShiftedReg.SrcReg) &&
1419            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1420                RegShiftedReg.ShiftReg);
1421   }
isRegShiftedImm() const1422   bool isRegShiftedImm() const {
1423     return Kind == k_ShiftedImmediate &&
1424            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1425                RegShiftedImm.SrcReg);
1426   }
isRotImm() const1427   bool isRotImm() const { return Kind == k_RotateImmediate; }
1428 
1429   template<unsigned Min, unsigned Max>
isPowerTwoInRange() const1430   bool isPowerTwoInRange() const {
1431     if (!isImm()) return false;
1432     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1433     if (!CE) return false;
1434     int64_t Value = CE->getValue();
1435     return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1436            Value <= Max;
1437   }
isModImm() const1438   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1439 
isModImmNot() const1440   bool isModImmNot() const {
1441     if (!isImm()) return false;
1442     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1443     if (!CE) return false;
1444     int64_t Value = CE->getValue();
1445     return ARM_AM::getSOImmVal(~Value) != -1;
1446   }
1447 
isModImmNeg() const1448   bool isModImmNeg() const {
1449     if (!isImm()) return false;
1450     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1451     if (!CE) return false;
1452     int64_t Value = CE->getValue();
1453     return ARM_AM::getSOImmVal(Value) == -1 &&
1454       ARM_AM::getSOImmVal(-Value) != -1;
1455   }
1456 
isThumbModImmNeg1_7() const1457   bool isThumbModImmNeg1_7() const {
1458     if (!isImm()) return false;
1459     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1460     if (!CE) return false;
1461     int32_t Value = -(int32_t)CE->getValue();
1462     return 0 < Value && Value < 8;
1463   }
1464 
isThumbModImmNeg8_255() const1465   bool isThumbModImmNeg8_255() const {
1466     if (!isImm()) return false;
1467     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1468     if (!CE) return false;
1469     int32_t Value = -(int32_t)CE->getValue();
1470     return 7 < Value && Value < 256;
1471   }
1472 
isConstantPoolImm() const1473   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
isBitfield() const1474   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
isPostIdxRegShifted() const1475   bool isPostIdxRegShifted() const {
1476     return Kind == k_PostIndexRegister &&
1477            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1478   }
isPostIdxReg() const1479   bool isPostIdxReg() const {
1480     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1481   }
isMemNoOffset(bool alignOK=false,unsigned Alignment=0) const1482   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1483     if (!isGPRMem())
1484       return false;
1485     // No offset of any kind.
1486     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1487      (alignOK || Memory.Alignment == Alignment);
1488   }
isMemNoOffsetT2(bool alignOK=false,unsigned Alignment=0) const1489   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1490     if (!isGPRMem())
1491       return false;
1492 
1493     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1494             Memory.BaseRegNum))
1495       return false;
1496 
1497     // No offset of any kind.
1498     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1499      (alignOK || Memory.Alignment == Alignment);
1500   }
isMemNoOffsetT2NoSp(bool alignOK=false,unsigned Alignment=0) const1501   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1502     if (!isGPRMem())
1503       return false;
1504 
1505     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1506             Memory.BaseRegNum))
1507       return false;
1508 
1509     // No offset of any kind.
1510     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1511      (alignOK || Memory.Alignment == Alignment);
1512   }
isMemNoOffsetT(bool alignOK=false,unsigned Alignment=0) const1513   bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1514     if (!isGPRMem())
1515       return false;
1516 
1517     if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1518             Memory.BaseRegNum))
1519       return false;
1520 
1521     // No offset of any kind.
1522     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1523      (alignOK || Memory.Alignment == Alignment);
1524   }
isMemPCRelImm12() const1525   bool isMemPCRelImm12() const {
1526     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1527       return false;
1528     // Base register must be PC.
1529     if (Memory.BaseRegNum != ARM::PC)
1530       return false;
1531     // Immediate offset in range [-4095, 4095].
1532     if (!Memory.OffsetImm) return true;
1533     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1534       int64_t Val = CE->getValue();
1535       return (Val > -4096 && Val < 4096) ||
1536              (Val == std::numeric_limits<int32_t>::min());
1537     }
1538     return false;
1539   }
1540 
isAlignedMemory() const1541   bool isAlignedMemory() const {
1542     return isMemNoOffset(true);
1543   }
1544 
isAlignedMemoryNone() const1545   bool isAlignedMemoryNone() const {
1546     return isMemNoOffset(false, 0);
1547   }
1548 
isDupAlignedMemoryNone() const1549   bool isDupAlignedMemoryNone() const {
1550     return isMemNoOffset(false, 0);
1551   }
1552 
isAlignedMemory16() const1553   bool isAlignedMemory16() const {
1554     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1555       return true;
1556     return isMemNoOffset(false, 0);
1557   }
1558 
isDupAlignedMemory16() const1559   bool isDupAlignedMemory16() const {
1560     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1561       return true;
1562     return isMemNoOffset(false, 0);
1563   }
1564 
isAlignedMemory32() const1565   bool isAlignedMemory32() const {
1566     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1567       return true;
1568     return isMemNoOffset(false, 0);
1569   }
1570 
isDupAlignedMemory32() const1571   bool isDupAlignedMemory32() const {
1572     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1573       return true;
1574     return isMemNoOffset(false, 0);
1575   }
1576 
isAlignedMemory64() const1577   bool isAlignedMemory64() const {
1578     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1579       return true;
1580     return isMemNoOffset(false, 0);
1581   }
1582 
isDupAlignedMemory64() const1583   bool isDupAlignedMemory64() const {
1584     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1585       return true;
1586     return isMemNoOffset(false, 0);
1587   }
1588 
isAlignedMemory64or128() const1589   bool isAlignedMemory64or128() const {
1590     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1591       return true;
1592     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1593       return true;
1594     return isMemNoOffset(false, 0);
1595   }
1596 
isDupAlignedMemory64or128() const1597   bool isDupAlignedMemory64or128() const {
1598     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1599       return true;
1600     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1601       return true;
1602     return isMemNoOffset(false, 0);
1603   }
1604 
isAlignedMemory64or128or256() const1605   bool isAlignedMemory64or128or256() const {
1606     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1607       return true;
1608     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1609       return true;
1610     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1611       return true;
1612     return isMemNoOffset(false, 0);
1613   }
1614 
isAddrMode2() const1615   bool isAddrMode2() const {
1616     if (!isGPRMem() || Memory.Alignment != 0) return false;
1617     // Check for register offset.
1618     if (Memory.OffsetRegNum) return true;
1619     // Immediate offset in range [-4095, 4095].
1620     if (!Memory.OffsetImm) return true;
1621     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1622       int64_t Val = CE->getValue();
1623       return Val > -4096 && Val < 4096;
1624     }
1625     return false;
1626   }
1627 
isAM2OffsetImm() const1628   bool isAM2OffsetImm() const {
1629     if (!isImm()) return false;
1630     // Immediate offset in range [-4095, 4095].
1631     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1632     if (!CE) return false;
1633     int64_t Val = CE->getValue();
1634     return (Val == std::numeric_limits<int32_t>::min()) ||
1635            (Val > -4096 && Val < 4096);
1636   }
1637 
isAddrMode3() const1638   bool isAddrMode3() const {
1639     // If we have an immediate that's not a constant, treat it as a label
1640     // reference needing a fixup. If it is a constant, it's something else
1641     // and we reject it.
1642     if (isImm() && !isa<MCConstantExpr>(getImm()))
1643       return true;
1644     if (!isGPRMem() || Memory.Alignment != 0) return false;
1645     // No shifts are legal for AM3.
1646     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1647     // Check for register offset.
1648     if (Memory.OffsetRegNum) return true;
1649     // Immediate offset in range [-255, 255].
1650     if (!Memory.OffsetImm) return true;
1651     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1652       int64_t Val = CE->getValue();
1653       // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1654       // we have to check for this too.
1655       return (Val > -256 && Val < 256) ||
1656              Val == std::numeric_limits<int32_t>::min();
1657     }
1658     return false;
1659   }
1660 
isAM3Offset() const1661   bool isAM3Offset() const {
1662     if (isPostIdxReg())
1663       return true;
1664     if (!isImm())
1665       return false;
1666     // Immediate offset in range [-255, 255].
1667     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1668     if (!CE) return false;
1669     int64_t Val = CE->getValue();
1670     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1671     return (Val > -256 && Val < 256) ||
1672            Val == std::numeric_limits<int32_t>::min();
1673   }
1674 
isAddrMode5() const1675   bool isAddrMode5() const {
1676     // If we have an immediate that's not a constant, treat it as a label
1677     // reference needing a fixup. If it is a constant, it's something else
1678     // and we reject it.
1679     if (isImm() && !isa<MCConstantExpr>(getImm()))
1680       return true;
1681     if (!isGPRMem() || Memory.Alignment != 0) return false;
1682     // Check for register offset.
1683     if (Memory.OffsetRegNum) return false;
1684     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1685     if (!Memory.OffsetImm) return true;
1686     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1687       int64_t Val = CE->getValue();
1688       return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1689              Val == std::numeric_limits<int32_t>::min();
1690     }
1691     return false;
1692   }
1693 
isAddrMode5FP16() const1694   bool isAddrMode5FP16() const {
1695     // If we have an immediate that's not a constant, treat it as a label
1696     // reference needing a fixup. If it is a constant, it's something else
1697     // and we reject it.
1698     if (isImm() && !isa<MCConstantExpr>(getImm()))
1699       return true;
1700     if (!isGPRMem() || Memory.Alignment != 0) return false;
1701     // Check for register offset.
1702     if (Memory.OffsetRegNum) return false;
1703     // Immediate offset in range [-510, 510] and a multiple of 2.
1704     if (!Memory.OffsetImm) return true;
1705     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1706       int64_t Val = CE->getValue();
1707       return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1708              Val == std::numeric_limits<int32_t>::min();
1709     }
1710     return false;
1711   }
1712 
isMemTBB() const1713   bool isMemTBB() const {
1714     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1715         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1716       return false;
1717     return true;
1718   }
1719 
isMemTBH() const1720   bool isMemTBH() const {
1721     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1722         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1723         Memory.Alignment != 0 )
1724       return false;
1725     return true;
1726   }
1727 
isMemRegOffset() const1728   bool isMemRegOffset() const {
1729     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1730       return false;
1731     return true;
1732   }
1733 
isT2MemRegOffset() const1734   bool isT2MemRegOffset() const {
1735     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1736         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1737       return false;
1738     // Only lsl #{0, 1, 2, 3} allowed.
1739     if (Memory.ShiftType == ARM_AM::no_shift)
1740       return true;
1741     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1742       return false;
1743     return true;
1744   }
1745 
isMemThumbRR() const1746   bool isMemThumbRR() const {
1747     // Thumb reg+reg addressing is simple. Just two registers, a base and
1748     // an offset. No shifts, negations or any other complicating factors.
1749     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1750         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1751       return false;
1752     return isARMLowRegister(Memory.BaseRegNum) &&
1753       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1754   }
1755 
isMemThumbRIs4() const1756   bool isMemThumbRIs4() const {
1757     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1758         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1759       return false;
1760     // Immediate offset, multiple of 4 in range [0, 124].
1761     if (!Memory.OffsetImm) return true;
1762     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1763       int64_t Val = CE->getValue();
1764       return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1765     }
1766     return false;
1767   }
1768 
isMemThumbRIs2() const1769   bool isMemThumbRIs2() const {
1770     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1771         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1772       return false;
1773     // Immediate offset, multiple of 4 in range [0, 62].
1774     if (!Memory.OffsetImm) return true;
1775     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1776       int64_t Val = CE->getValue();
1777       return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1778     }
1779     return false;
1780   }
1781 
isMemThumbRIs1() const1782   bool isMemThumbRIs1() const {
1783     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1784         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1785       return false;
1786     // Immediate offset in range [0, 31].
1787     if (!Memory.OffsetImm) return true;
1788     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1789       int64_t Val = CE->getValue();
1790       return Val >= 0 && Val <= 31;
1791     }
1792     return false;
1793   }
1794 
isMemThumbSPI() const1795   bool isMemThumbSPI() const {
1796     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1797         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1798       return false;
1799     // Immediate offset, multiple of 4 in range [0, 1020].
1800     if (!Memory.OffsetImm) return true;
1801     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1802       int64_t Val = CE->getValue();
1803       return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1804     }
1805     return false;
1806   }
1807 
isMemImm8s4Offset() const1808   bool isMemImm8s4Offset() const {
1809     // If we have an immediate that's not a constant, treat it as a label
1810     // reference needing a fixup. If it is a constant, it's something else
1811     // and we reject it.
1812     if (isImm() && !isa<MCConstantExpr>(getImm()))
1813       return true;
1814     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1815       return false;
1816     // Immediate offset a multiple of 4 in range [-1020, 1020].
1817     if (!Memory.OffsetImm) return true;
1818     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1819       int64_t Val = CE->getValue();
1820       // Special case, #-0 is std::numeric_limits<int32_t>::min().
1821       return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1822              Val == std::numeric_limits<int32_t>::min();
1823     }
1824     return false;
1825   }
1826 
isMemImm7s4Offset() const1827   bool isMemImm7s4Offset() const {
1828     // If we have an immediate that's not a constant, treat it as a label
1829     // reference needing a fixup. If it is a constant, it's something else
1830     // and we reject it.
1831     if (isImm() && !isa<MCConstantExpr>(getImm()))
1832       return true;
1833     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1834         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1835             Memory.BaseRegNum))
1836       return false;
1837     // Immediate offset a multiple of 4 in range [-508, 508].
1838     if (!Memory.OffsetImm) return true;
1839     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1840       int64_t Val = CE->getValue();
1841       // Special case, #-0 is INT32_MIN.
1842       return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1843     }
1844     return false;
1845   }
1846 
isMemImm0_1020s4Offset() const1847   bool isMemImm0_1020s4Offset() const {
1848     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1849       return false;
1850     // Immediate offset a multiple of 4 in range [0, 1020].
1851     if (!Memory.OffsetImm) return true;
1852     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1853       int64_t Val = CE->getValue();
1854       return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1855     }
1856     return false;
1857   }
1858 
isMemImm8Offset() const1859   bool isMemImm8Offset() const {
1860     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1861       return false;
1862     // Base reg of PC isn't allowed for these encodings.
1863     if (Memory.BaseRegNum == ARM::PC) return false;
1864     // Immediate offset in range [-255, 255].
1865     if (!Memory.OffsetImm) return true;
1866     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1867       int64_t Val = CE->getValue();
1868       return (Val == std::numeric_limits<int32_t>::min()) ||
1869              (Val > -256 && Val < 256);
1870     }
1871     return false;
1872   }
1873 
1874   template<unsigned Bits, unsigned RegClassID>
isMemImm7ShiftedOffset() const1875   bool isMemImm7ShiftedOffset() const {
1876     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1877         !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1878       return false;
1879 
1880     // Expect an immediate offset equal to an element of the range
1881     // [-127, 127], shifted left by Bits.
1882 
1883     if (!Memory.OffsetImm) return true;
1884     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1885       int64_t Val = CE->getValue();
1886 
1887       // INT32_MIN is a special-case value (indicating the encoding with
1888       // zero offset and the subtract bit set)
1889       if (Val == INT32_MIN)
1890         return true;
1891 
1892       unsigned Divisor = 1U << Bits;
1893 
1894       // Check that the low bits are zero
1895       if (Val % Divisor != 0)
1896         return false;
1897 
1898       // Check that the remaining offset is within range.
1899       Val /= Divisor;
1900       return (Val >= -127 && Val <= 127);
1901     }
1902     return false;
1903   }
1904 
isMemRegRQOffset() const1905   template <int shift> bool isMemRegRQOffset() const {
1906     if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1907       return false;
1908 
1909     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1910             Memory.BaseRegNum))
1911       return false;
1912     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1913             Memory.OffsetRegNum))
1914       return false;
1915 
1916     if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1917       return false;
1918 
1919     if (shift > 0 &&
1920         (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1921       return false;
1922 
1923     return true;
1924   }
1925 
isMemRegQOffset() const1926   template <int shift> bool isMemRegQOffset() const {
1927     if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1928       return false;
1929 
1930     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1931             Memory.BaseRegNum))
1932       return false;
1933 
1934     if (!Memory.OffsetImm)
1935       return true;
1936     static_assert(shift < 56,
1937                   "Such that we dont shift by a value higher than 62");
1938     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1939       int64_t Val = CE->getValue();
1940 
1941       // The value must be a multiple of (1 << shift)
1942       if ((Val & ((1U << shift) - 1)) != 0)
1943         return false;
1944 
1945       // And be in the right range, depending on the amount that it is shifted
1946       // by.  Shift 0, is equal to 7 unsigned bits, the sign bit is set
1947       // separately.
1948       int64_t Range = (1U << (7 + shift)) - 1;
1949       return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1950     }
1951     return false;
1952   }
1953 
isMemPosImm8Offset() const1954   bool isMemPosImm8Offset() const {
1955     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1956       return false;
1957     // Immediate offset in range [0, 255].
1958     if (!Memory.OffsetImm) return true;
1959     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1960       int64_t Val = CE->getValue();
1961       return Val >= 0 && Val < 256;
1962     }
1963     return false;
1964   }
1965 
isMemNegImm8Offset() const1966   bool isMemNegImm8Offset() const {
1967     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1968       return false;
1969     // Base reg of PC isn't allowed for these encodings.
1970     if (Memory.BaseRegNum == ARM::PC) return false;
1971     // Immediate offset in range [-255, -1].
1972     if (!Memory.OffsetImm) return false;
1973     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1974       int64_t Val = CE->getValue();
1975       return (Val == std::numeric_limits<int32_t>::min()) ||
1976              (Val > -256 && Val < 0);
1977     }
1978     return false;
1979   }
1980 
isMemUImm12Offset() const1981   bool isMemUImm12Offset() const {
1982     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1983       return false;
1984     // Immediate offset in range [0, 4095].
1985     if (!Memory.OffsetImm) return true;
1986     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1987       int64_t Val = CE->getValue();
1988       return (Val >= 0 && Val < 4096);
1989     }
1990     return false;
1991   }
1992 
isMemImm12Offset() const1993   bool isMemImm12Offset() const {
1994     // If we have an immediate that's not a constant, treat it as a label
1995     // reference needing a fixup. If it is a constant, it's something else
1996     // and we reject it.
1997 
1998     if (isImm() && !isa<MCConstantExpr>(getImm()))
1999       return true;
2000 
2001     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
2002       return false;
2003     // Immediate offset in range [-4095, 4095].
2004     if (!Memory.OffsetImm) return true;
2005     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2006       int64_t Val = CE->getValue();
2007       return (Val > -4096 && Val < 4096) ||
2008              (Val == std::numeric_limits<int32_t>::min());
2009     }
2010     // If we have an immediate that's not a constant, treat it as a
2011     // symbolic expression needing a fixup.
2012     return true;
2013   }
2014 
isConstPoolAsmImm() const2015   bool isConstPoolAsmImm() const {
2016     // Delay processing of Constant Pool Immediate, this will turn into
2017     // a constant. Match no other operand
2018     return (isConstantPoolImm());
2019   }
2020 
isPostIdxImm8() const2021   bool isPostIdxImm8() const {
2022     if (!isImm()) return false;
2023     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2024     if (!CE) return false;
2025     int64_t Val = CE->getValue();
2026     return (Val > -256 && Val < 256) ||
2027            (Val == std::numeric_limits<int32_t>::min());
2028   }
2029 
isPostIdxImm8s4() const2030   bool isPostIdxImm8s4() const {
2031     if (!isImm()) return false;
2032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2033     if (!CE) return false;
2034     int64_t Val = CE->getValue();
2035     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2036            (Val == std::numeric_limits<int32_t>::min());
2037   }
2038 
isMSRMask() const2039   bool isMSRMask() const { return Kind == k_MSRMask; }
isBankedReg() const2040   bool isBankedReg() const { return Kind == k_BankedReg; }
isProcIFlags() const2041   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2042 
2043   // NEON operands.
isAnyVectorList() const2044   bool isAnyVectorList() const {
2045     return Kind == k_VectorList || Kind == k_VectorListAllLanes ||
2046            Kind == k_VectorListIndexed;
2047   }
2048 
isVectorList() const2049   bool isVectorList() const { return Kind == k_VectorList; }
2050 
isSingleSpacedVectorList() const2051   bool isSingleSpacedVectorList() const {
2052     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2053   }
2054 
isDoubleSpacedVectorList() const2055   bool isDoubleSpacedVectorList() const {
2056     return Kind == k_VectorList && VectorList.isDoubleSpaced;
2057   }
2058 
isVecListOneD() const2059   bool isVecListOneD() const {
2060     // We convert a single D reg to a list containing a D reg
2061     if (isDReg() && !Parser->hasMVE())
2062       return true;
2063     if (!isSingleSpacedVectorList()) return false;
2064     return VectorList.Count == 1;
2065   }
2066 
isVecListTwoMQ() const2067   bool isVecListTwoMQ() const {
2068     return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2069            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2070                VectorList.RegNum);
2071   }
2072 
isVecListDPair() const2073   bool isVecListDPair() const {
2074     // We convert a single Q reg to a list with the two corresponding D
2075     // registers
2076     if (isQReg() && !Parser->hasMVE())
2077       return true;
2078     if (!isSingleSpacedVectorList()) return false;
2079     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2080               .contains(VectorList.RegNum));
2081   }
2082 
isVecListThreeD() const2083   bool isVecListThreeD() const {
2084     if (!isSingleSpacedVectorList()) return false;
2085     return VectorList.Count == 3;
2086   }
2087 
isVecListFourD() const2088   bool isVecListFourD() const {
2089     if (!isSingleSpacedVectorList()) return false;
2090     return VectorList.Count == 4;
2091   }
2092 
isVecListDPairSpaced() const2093   bool isVecListDPairSpaced() const {
2094     if (Kind != k_VectorList) return false;
2095     if (isSingleSpacedVectorList()) return false;
2096     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2097               .contains(VectorList.RegNum));
2098   }
2099 
isVecListThreeQ() const2100   bool isVecListThreeQ() const {
2101     if (!isDoubleSpacedVectorList()) return false;
2102     return VectorList.Count == 3;
2103   }
2104 
isVecListFourQ() const2105   bool isVecListFourQ() const {
2106     if (!isDoubleSpacedVectorList()) return false;
2107     return VectorList.Count == 4;
2108   }
2109 
isVecListFourMQ() const2110   bool isVecListFourMQ() const {
2111     return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2112            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2113                VectorList.RegNum);
2114   }
2115 
isSingleSpacedVectorAllLanes() const2116   bool isSingleSpacedVectorAllLanes() const {
2117     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2118   }
2119 
isDoubleSpacedVectorAllLanes() const2120   bool isDoubleSpacedVectorAllLanes() const {
2121     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2122   }
2123 
isVecListOneDAllLanes() const2124   bool isVecListOneDAllLanes() const {
2125     if (!isSingleSpacedVectorAllLanes()) return false;
2126     return VectorList.Count == 1;
2127   }
2128 
isVecListDPairAllLanes() const2129   bool isVecListDPairAllLanes() const {
2130     if (!isSingleSpacedVectorAllLanes()) return false;
2131     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2132               .contains(VectorList.RegNum));
2133   }
2134 
isVecListDPairSpacedAllLanes() const2135   bool isVecListDPairSpacedAllLanes() const {
2136     if (!isDoubleSpacedVectorAllLanes()) return false;
2137     return VectorList.Count == 2;
2138   }
2139 
isVecListThreeDAllLanes() const2140   bool isVecListThreeDAllLanes() const {
2141     if (!isSingleSpacedVectorAllLanes()) return false;
2142     return VectorList.Count == 3;
2143   }
2144 
isVecListThreeQAllLanes() const2145   bool isVecListThreeQAllLanes() const {
2146     if (!isDoubleSpacedVectorAllLanes()) return false;
2147     return VectorList.Count == 3;
2148   }
2149 
isVecListFourDAllLanes() const2150   bool isVecListFourDAllLanes() const {
2151     if (!isSingleSpacedVectorAllLanes()) return false;
2152     return VectorList.Count == 4;
2153   }
2154 
isVecListFourQAllLanes() const2155   bool isVecListFourQAllLanes() const {
2156     if (!isDoubleSpacedVectorAllLanes()) return false;
2157     return VectorList.Count == 4;
2158   }
2159 
isSingleSpacedVectorIndexed() const2160   bool isSingleSpacedVectorIndexed() const {
2161     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2162   }
2163 
isDoubleSpacedVectorIndexed() const2164   bool isDoubleSpacedVectorIndexed() const {
2165     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2166   }
2167 
isVecListOneDByteIndexed() const2168   bool isVecListOneDByteIndexed() const {
2169     if (!isSingleSpacedVectorIndexed()) return false;
2170     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2171   }
2172 
isVecListOneDHWordIndexed() const2173   bool isVecListOneDHWordIndexed() const {
2174     if (!isSingleSpacedVectorIndexed()) return false;
2175     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2176   }
2177 
isVecListOneDWordIndexed() const2178   bool isVecListOneDWordIndexed() const {
2179     if (!isSingleSpacedVectorIndexed()) return false;
2180     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2181   }
2182 
isVecListTwoDByteIndexed() const2183   bool isVecListTwoDByteIndexed() const {
2184     if (!isSingleSpacedVectorIndexed()) return false;
2185     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2186   }
2187 
isVecListTwoDHWordIndexed() const2188   bool isVecListTwoDHWordIndexed() const {
2189     if (!isSingleSpacedVectorIndexed()) return false;
2190     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2191   }
2192 
isVecListTwoQWordIndexed() const2193   bool isVecListTwoQWordIndexed() const {
2194     if (!isDoubleSpacedVectorIndexed()) return false;
2195     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2196   }
2197 
isVecListTwoQHWordIndexed() const2198   bool isVecListTwoQHWordIndexed() const {
2199     if (!isDoubleSpacedVectorIndexed()) return false;
2200     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2201   }
2202 
isVecListTwoDWordIndexed() const2203   bool isVecListTwoDWordIndexed() const {
2204     if (!isSingleSpacedVectorIndexed()) return false;
2205     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2206   }
2207 
isVecListThreeDByteIndexed() const2208   bool isVecListThreeDByteIndexed() const {
2209     if (!isSingleSpacedVectorIndexed()) return false;
2210     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2211   }
2212 
isVecListThreeDHWordIndexed() const2213   bool isVecListThreeDHWordIndexed() const {
2214     if (!isSingleSpacedVectorIndexed()) return false;
2215     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2216   }
2217 
isVecListThreeQWordIndexed() const2218   bool isVecListThreeQWordIndexed() const {
2219     if (!isDoubleSpacedVectorIndexed()) return false;
2220     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2221   }
2222 
isVecListThreeQHWordIndexed() const2223   bool isVecListThreeQHWordIndexed() const {
2224     if (!isDoubleSpacedVectorIndexed()) return false;
2225     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2226   }
2227 
isVecListThreeDWordIndexed() const2228   bool isVecListThreeDWordIndexed() const {
2229     if (!isSingleSpacedVectorIndexed()) return false;
2230     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2231   }
2232 
isVecListFourDByteIndexed() const2233   bool isVecListFourDByteIndexed() const {
2234     if (!isSingleSpacedVectorIndexed()) return false;
2235     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2236   }
2237 
isVecListFourDHWordIndexed() const2238   bool isVecListFourDHWordIndexed() const {
2239     if (!isSingleSpacedVectorIndexed()) return false;
2240     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2241   }
2242 
isVecListFourQWordIndexed() const2243   bool isVecListFourQWordIndexed() const {
2244     if (!isDoubleSpacedVectorIndexed()) return false;
2245     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2246   }
2247 
isVecListFourQHWordIndexed() const2248   bool isVecListFourQHWordIndexed() const {
2249     if (!isDoubleSpacedVectorIndexed()) return false;
2250     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2251   }
2252 
isVecListFourDWordIndexed() const2253   bool isVecListFourDWordIndexed() const {
2254     if (!isSingleSpacedVectorIndexed()) return false;
2255     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2256   }
2257 
isVectorIndex() const2258   bool isVectorIndex() const { return Kind == k_VectorIndex; }
2259 
2260   template <unsigned NumLanes>
isVectorIndexInRange() const2261   bool isVectorIndexInRange() const {
2262     if (Kind != k_VectorIndex) return false;
2263     return VectorIndex.Val < NumLanes;
2264   }
2265 
isVectorIndex8() const2266   bool isVectorIndex8()  const { return isVectorIndexInRange<8>(); }
isVectorIndex16() const2267   bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
isVectorIndex32() const2268   bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
isVectorIndex64() const2269   bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2270 
2271   template<int PermittedValue, int OtherPermittedValue>
isMVEPairVectorIndex() const2272   bool isMVEPairVectorIndex() const {
2273     if (Kind != k_VectorIndex) return false;
2274     return VectorIndex.Val == PermittedValue ||
2275            VectorIndex.Val == OtherPermittedValue;
2276   }
2277 
isNEONi8splat() const2278   bool isNEONi8splat() const {
2279     if (!isImm()) return false;
2280     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2281     // Must be a constant.
2282     if (!CE) return false;
2283     int64_t Value = CE->getValue();
2284     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2285     // value.
2286     return Value >= 0 && Value < 256;
2287   }
2288 
isNEONi16splat() const2289   bool isNEONi16splat() const {
2290     if (isNEONByteReplicate(2))
2291       return false; // Leave that for bytes replication and forbid by default.
2292     if (!isImm())
2293       return false;
2294     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2295     // Must be a constant.
2296     if (!CE) return false;
2297     unsigned Value = CE->getValue();
2298     return ARM_AM::isNEONi16splat(Value);
2299   }
2300 
isNEONi16splatNot() const2301   bool isNEONi16splatNot() const {
2302     if (!isImm())
2303       return false;
2304     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2305     // Must be a constant.
2306     if (!CE) return false;
2307     unsigned Value = CE->getValue();
2308     return ARM_AM::isNEONi16splat(~Value & 0xffff);
2309   }
2310 
isNEONi32splat() const2311   bool isNEONi32splat() const {
2312     if (isNEONByteReplicate(4))
2313       return false; // Leave that for bytes replication and forbid by default.
2314     if (!isImm())
2315       return false;
2316     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2317     // Must be a constant.
2318     if (!CE) return false;
2319     unsigned Value = CE->getValue();
2320     return ARM_AM::isNEONi32splat(Value);
2321   }
2322 
isNEONi32splatNot() const2323   bool isNEONi32splatNot() const {
2324     if (!isImm())
2325       return false;
2326     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2327     // Must be a constant.
2328     if (!CE) return false;
2329     unsigned Value = CE->getValue();
2330     return ARM_AM::isNEONi32splat(~Value);
2331   }
2332 
isValidNEONi32vmovImm(int64_t Value)2333   static bool isValidNEONi32vmovImm(int64_t Value) {
2334     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2335     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2336     return ((Value & 0xffffffffffffff00) == 0) ||
2337            ((Value & 0xffffffffffff00ff) == 0) ||
2338            ((Value & 0xffffffffff00ffff) == 0) ||
2339            ((Value & 0xffffffff00ffffff) == 0) ||
2340            ((Value & 0xffffffffffff00ff) == 0xff) ||
2341            ((Value & 0xffffffffff00ffff) == 0xffff);
2342   }
2343 
isNEONReplicate(unsigned Width,unsigned NumElems,bool Inv) const2344   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2345     assert((Width == 8 || Width == 16 || Width == 32) &&
2346            "Invalid element width");
2347     assert(NumElems * Width <= 64 && "Invalid result width");
2348 
2349     if (!isImm())
2350       return false;
2351     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2352     // Must be a constant.
2353     if (!CE)
2354       return false;
2355     int64_t Value = CE->getValue();
2356     if (!Value)
2357       return false; // Don't bother with zero.
2358     if (Inv)
2359       Value = ~Value;
2360 
2361     uint64_t Mask = (1ull << Width) - 1;
2362     uint64_t Elem = Value & Mask;
2363     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2364       return false;
2365     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2366       return false;
2367 
2368     for (unsigned i = 1; i < NumElems; ++i) {
2369       Value >>= Width;
2370       if ((Value & Mask) != Elem)
2371         return false;
2372     }
2373     return true;
2374   }
2375 
isNEONByteReplicate(unsigned NumBytes) const2376   bool isNEONByteReplicate(unsigned NumBytes) const {
2377     return isNEONReplicate(8, NumBytes, false);
2378   }
2379 
checkNeonReplicateArgs(unsigned FromW,unsigned ToW)2380   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2381     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2382            "Invalid source width");
2383     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2384            "Invalid destination width");
2385     assert(FromW < ToW && "ToW is not less than FromW");
2386   }
2387 
2388   template<unsigned FromW, unsigned ToW>
isNEONmovReplicate() const2389   bool isNEONmovReplicate() const {
2390     checkNeonReplicateArgs(FromW, ToW);
2391     if (ToW == 64 && isNEONi64splat())
2392       return false;
2393     return isNEONReplicate(FromW, ToW / FromW, false);
2394   }
2395 
2396   template<unsigned FromW, unsigned ToW>
isNEONinvReplicate() const2397   bool isNEONinvReplicate() const {
2398     checkNeonReplicateArgs(FromW, ToW);
2399     return isNEONReplicate(FromW, ToW / FromW, true);
2400   }
2401 
isNEONi32vmov() const2402   bool isNEONi32vmov() const {
2403     if (isNEONByteReplicate(4))
2404       return false; // Let it to be classified as byte-replicate case.
2405     if (!isImm())
2406       return false;
2407     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2408     // Must be a constant.
2409     if (!CE)
2410       return false;
2411     return isValidNEONi32vmovImm(CE->getValue());
2412   }
2413 
isNEONi32vmovNeg() const2414   bool isNEONi32vmovNeg() const {
2415     if (!isImm()) return false;
2416     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2417     // Must be a constant.
2418     if (!CE) return false;
2419     return isValidNEONi32vmovImm(~CE->getValue());
2420   }
2421 
isNEONi64splat() const2422   bool isNEONi64splat() const {
2423     if (!isImm()) return false;
2424     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2425     // Must be a constant.
2426     if (!CE) return false;
2427     uint64_t Value = CE->getValue();
2428     // i64 value with each byte being either 0 or 0xff.
2429     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2430       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2431     return true;
2432   }
2433 
2434   template<int64_t Angle, int64_t Remainder>
isComplexRotation() const2435   bool isComplexRotation() const {
2436     if (!isImm()) return false;
2437 
2438     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2439     if (!CE) return false;
2440     uint64_t Value = CE->getValue();
2441 
2442     return (Value % Angle == Remainder && Value <= 270);
2443   }
2444 
isMVELongShift() const2445   bool isMVELongShift() const {
2446     if (!isImm()) return false;
2447     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2448     // Must be a constant.
2449     if (!CE) return false;
2450     uint64_t Value = CE->getValue();
2451     return Value >= 1 && Value <= 32;
2452   }
2453 
isMveSaturateOp() const2454   bool isMveSaturateOp() const {
2455     if (!isImm()) return false;
2456     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2457     if (!CE) return false;
2458     uint64_t Value = CE->getValue();
2459     return Value == 48 || Value == 64;
2460   }
2461 
isITCondCodeNoAL() const2462   bool isITCondCodeNoAL() const {
2463     if (!isITCondCode()) return false;
2464     ARMCC::CondCodes CC = getCondCode();
2465     return CC != ARMCC::AL;
2466   }
2467 
isITCondCodeRestrictedI() const2468   bool isITCondCodeRestrictedI() const {
2469     if (!isITCondCode())
2470       return false;
2471     ARMCC::CondCodes CC = getCondCode();
2472     return CC == ARMCC::EQ || CC == ARMCC::NE;
2473   }
2474 
isITCondCodeRestrictedS() const2475   bool isITCondCodeRestrictedS() const {
2476     if (!isITCondCode())
2477       return false;
2478     ARMCC::CondCodes CC = getCondCode();
2479     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2480            CC == ARMCC::GE;
2481   }
2482 
isITCondCodeRestrictedU() const2483   bool isITCondCodeRestrictedU() const {
2484     if (!isITCondCode())
2485       return false;
2486     ARMCC::CondCodes CC = getCondCode();
2487     return CC == ARMCC::HS || CC == ARMCC::HI;
2488   }
2489 
isITCondCodeRestrictedFP() const2490   bool isITCondCodeRestrictedFP() const {
2491     if (!isITCondCode())
2492       return false;
2493     ARMCC::CondCodes CC = getCondCode();
2494     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2495            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2496   }
2497 
setVecListDPair(unsigned int DPair)2498   void setVecListDPair(unsigned int DPair) {
2499     Kind = k_VectorList;
2500     VectorList.RegNum = DPair;
2501     VectorList.Count = 2;
2502     VectorList.isDoubleSpaced = false;
2503   }
2504 
setVecListOneD(unsigned int DReg)2505   void setVecListOneD(unsigned int DReg) {
2506     Kind = k_VectorList;
2507     VectorList.RegNum = DReg;
2508     VectorList.Count = 1;
2509     VectorList.isDoubleSpaced = false;
2510   }
2511 
addExpr(MCInst & Inst,const MCExpr * Expr) const2512   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2513     // Add as immediates when possible.  Null MCExpr = 0.
2514     if (!Expr)
2515       Inst.addOperand(MCOperand::createImm(0));
2516     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2517       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2518     else
2519       Inst.addOperand(MCOperand::createExpr(Expr));
2520   }
2521 
addARMBranchTargetOperands(MCInst & Inst,unsigned N) const2522   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2523     assert(N == 1 && "Invalid number of operands!");
2524     addExpr(Inst, getImm());
2525   }
2526 
addThumbBranchTargetOperands(MCInst & Inst,unsigned N) const2527   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2528     assert(N == 1 && "Invalid number of operands!");
2529     addExpr(Inst, getImm());
2530   }
2531 
addCondCodeOperands(MCInst & Inst,unsigned N) const2532   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2533     assert(N == 2 && "Invalid number of operands!");
2534     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2535     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2536     Inst.addOperand(MCOperand::createReg(RegNum));
2537   }
2538 
addVPTPredNOperands(MCInst & Inst,unsigned N) const2539   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2540     assert(N == 3 && "Invalid number of operands!");
2541     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2542     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2543     Inst.addOperand(MCOperand::createReg(RegNum));
2544     Inst.addOperand(MCOperand::createReg(0));
2545   }
2546 
addVPTPredROperands(MCInst & Inst,unsigned N) const2547   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2548     assert(N == 4 && "Invalid number of operands!");
2549     addVPTPredNOperands(Inst, N-1);
2550     unsigned RegNum;
2551     if (getVPTPred() == ARMVCC::None) {
2552       RegNum = 0;
2553     } else {
2554       unsigned NextOpIndex = Inst.getNumOperands();
2555       auto &MCID = Parser->getInstrDesc(Inst.getOpcode());
2556       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2557       assert(TiedOp >= 0 &&
2558              "Inactive register in vpred_r is not tied to an output!");
2559       RegNum = Inst.getOperand(TiedOp).getReg();
2560     }
2561     Inst.addOperand(MCOperand::createReg(RegNum));
2562   }
2563 
addCoprocNumOperands(MCInst & Inst,unsigned N) const2564   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2565     assert(N == 1 && "Invalid number of operands!");
2566     Inst.addOperand(MCOperand::createImm(getCoproc()));
2567   }
2568 
addCoprocRegOperands(MCInst & Inst,unsigned N) const2569   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2570     assert(N == 1 && "Invalid number of operands!");
2571     Inst.addOperand(MCOperand::createImm(getCoproc()));
2572   }
2573 
addCoprocOptionOperands(MCInst & Inst,unsigned N) const2574   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2575     assert(N == 1 && "Invalid number of operands!");
2576     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2577   }
2578 
addITMaskOperands(MCInst & Inst,unsigned N) const2579   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2580     assert(N == 1 && "Invalid number of operands!");
2581     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2582   }
2583 
addITCondCodeOperands(MCInst & Inst,unsigned N) const2584   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2585     assert(N == 1 && "Invalid number of operands!");
2586     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2587   }
2588 
addITCondCodeInvOperands(MCInst & Inst,unsigned N) const2589   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2590     assert(N == 1 && "Invalid number of operands!");
2591     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2592   }
2593 
addCCOutOperands(MCInst & Inst,unsigned N) const2594   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2595     assert(N == 1 && "Invalid number of operands!");
2596     Inst.addOperand(MCOperand::createReg(getReg()));
2597   }
2598 
addRegOperands(MCInst & Inst,unsigned N) const2599   void addRegOperands(MCInst &Inst, unsigned N) const {
2600     assert(N == 1 && "Invalid number of operands!");
2601     Inst.addOperand(MCOperand::createReg(getReg()));
2602   }
2603 
addRegShiftedRegOperands(MCInst & Inst,unsigned N) const2604   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2605     assert(N == 3 && "Invalid number of operands!");
2606     assert(isRegShiftedReg() &&
2607            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2608     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2609     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2610     Inst.addOperand(MCOperand::createImm(
2611       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2612   }
2613 
addRegShiftedImmOperands(MCInst & Inst,unsigned N) const2614   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2615     assert(N == 2 && "Invalid number of operands!");
2616     assert(isRegShiftedImm() &&
2617            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2618     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2619     // Shift of #32 is encoded as 0 where permitted
2620     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2621     Inst.addOperand(MCOperand::createImm(
2622       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2623   }
2624 
addShifterImmOperands(MCInst & Inst,unsigned N) const2625   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2626     assert(N == 1 && "Invalid number of operands!");
2627     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2628                                          ShifterImm.Imm));
2629   }
2630 
addRegListOperands(MCInst & Inst,unsigned N) const2631   void addRegListOperands(MCInst &Inst, unsigned N) const {
2632     assert(N == 1 && "Invalid number of operands!");
2633     const SmallVectorImpl<unsigned> &RegList = getRegList();
2634     for (unsigned Reg : RegList)
2635       Inst.addOperand(MCOperand::createReg(Reg));
2636   }
2637 
addRegListWithAPSROperands(MCInst & Inst,unsigned N) const2638   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2639     assert(N == 1 && "Invalid number of operands!");
2640     const SmallVectorImpl<unsigned> &RegList = getRegList();
2641     for (unsigned Reg : RegList)
2642       Inst.addOperand(MCOperand::createReg(Reg));
2643   }
2644 
addDPRRegListOperands(MCInst & Inst,unsigned N) const2645   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2646     addRegListOperands(Inst, N);
2647   }
2648 
addSPRRegListOperands(MCInst & Inst,unsigned N) const2649   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2650     addRegListOperands(Inst, N);
2651   }
2652 
addFPSRegListWithVPROperands(MCInst & Inst,unsigned N) const2653   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2654     addRegListOperands(Inst, N);
2655   }
2656 
addFPDRegListWithVPROperands(MCInst & Inst,unsigned N) const2657   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2658     addRegListOperands(Inst, N);
2659   }
2660 
addRotImmOperands(MCInst & Inst,unsigned N) const2661   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2662     assert(N == 1 && "Invalid number of operands!");
2663     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2664     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2665   }
2666 
addModImmOperands(MCInst & Inst,unsigned N) const2667   void addModImmOperands(MCInst &Inst, unsigned N) const {
2668     assert(N == 1 && "Invalid number of operands!");
2669 
2670     // Support for fixups (MCFixup)
2671     if (isImm())
2672       return addImmOperands(Inst, N);
2673 
2674     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2675   }
2676 
addModImmNotOperands(MCInst & Inst,unsigned N) const2677   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2678     assert(N == 1 && "Invalid number of operands!");
2679     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2680     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2681     Inst.addOperand(MCOperand::createImm(Enc));
2682   }
2683 
addModImmNegOperands(MCInst & Inst,unsigned N) const2684   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2685     assert(N == 1 && "Invalid number of operands!");
2686     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2687     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2688     Inst.addOperand(MCOperand::createImm(Enc));
2689   }
2690 
addThumbModImmNeg8_255Operands(MCInst & Inst,unsigned N) const2691   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2692     assert(N == 1 && "Invalid number of operands!");
2693     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2694     uint32_t Val = -CE->getValue();
2695     Inst.addOperand(MCOperand::createImm(Val));
2696   }
2697 
addThumbModImmNeg1_7Operands(MCInst & Inst,unsigned N) const2698   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2699     assert(N == 1 && "Invalid number of operands!");
2700     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2701     uint32_t Val = -CE->getValue();
2702     Inst.addOperand(MCOperand::createImm(Val));
2703   }
2704 
addBitfieldOperands(MCInst & Inst,unsigned N) const2705   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2706     assert(N == 1 && "Invalid number of operands!");
2707     // Munge the lsb/width into a bitfield mask.
2708     unsigned lsb = Bitfield.LSB;
2709     unsigned width = Bitfield.Width;
2710     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2711     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2712                       (32 - (lsb + width)));
2713     Inst.addOperand(MCOperand::createImm(Mask));
2714   }
2715 
addImmOperands(MCInst & Inst,unsigned N) const2716   void addImmOperands(MCInst &Inst, unsigned N) const {
2717     assert(N == 1 && "Invalid number of operands!");
2718     addExpr(Inst, getImm());
2719   }
2720 
addFBits16Operands(MCInst & Inst,unsigned N) const2721   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2722     assert(N == 1 && "Invalid number of operands!");
2723     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2724     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2725   }
2726 
addFBits32Operands(MCInst & Inst,unsigned N) const2727   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2728     assert(N == 1 && "Invalid number of operands!");
2729     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2730     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2731   }
2732 
addFPImmOperands(MCInst & Inst,unsigned N) const2733   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2734     assert(N == 1 && "Invalid number of operands!");
2735     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2736     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2737     Inst.addOperand(MCOperand::createImm(Val));
2738   }
2739 
addImm8s4Operands(MCInst & Inst,unsigned N) const2740   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2741     assert(N == 1 && "Invalid number of operands!");
2742     // FIXME: We really want to scale the value here, but the LDRD/STRD
2743     // instruction don't encode operands that way yet.
2744     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2745     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2746   }
2747 
addImm7s4Operands(MCInst & Inst,unsigned N) const2748   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2749     assert(N == 1 && "Invalid number of operands!");
2750     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2751     // instruction don't encode operands that way yet.
2752     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2753     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2754   }
2755 
addImm7Shift0Operands(MCInst & Inst,unsigned N) const2756   void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2757     assert(N == 1 && "Invalid number of operands!");
2758     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2759     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2760   }
2761 
addImm7Shift1Operands(MCInst & Inst,unsigned N) const2762   void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2763     assert(N == 1 && "Invalid number of operands!");
2764     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2765     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2766   }
2767 
addImm7Shift2Operands(MCInst & Inst,unsigned N) const2768   void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2769     assert(N == 1 && "Invalid number of operands!");
2770     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2771     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2772   }
2773 
addImm7Operands(MCInst & Inst,unsigned N) const2774   void addImm7Operands(MCInst &Inst, unsigned N) const {
2775     assert(N == 1 && "Invalid number of operands!");
2776     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2777     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2778   }
2779 
addImm0_1020s4Operands(MCInst & Inst,unsigned N) const2780   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2781     assert(N == 1 && "Invalid number of operands!");
2782     // The immediate is scaled by four in the encoding and is stored
2783     // in the MCInst as such. Lop off the low two bits here.
2784     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2785     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2786   }
2787 
addImm0_508s4NegOperands(MCInst & Inst,unsigned N) const2788   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2789     assert(N == 1 && "Invalid number of operands!");
2790     // The immediate is scaled by four in the encoding and is stored
2791     // in the MCInst as such. Lop off the low two bits here.
2792     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2793     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2794   }
2795 
addImm0_508s4Operands(MCInst & Inst,unsigned N) const2796   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2797     assert(N == 1 && "Invalid number of operands!");
2798     // The immediate is scaled by four in the encoding and is stored
2799     // in the MCInst as such. Lop off the low two bits here.
2800     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2801     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2802   }
2803 
addImm1_16Operands(MCInst & Inst,unsigned N) const2804   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2805     assert(N == 1 && "Invalid number of operands!");
2806     // The constant encodes as the immediate-1, and we store in the instruction
2807     // the bits as encoded, so subtract off one here.
2808     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2809     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2810   }
2811 
addImm1_32Operands(MCInst & Inst,unsigned N) const2812   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2813     assert(N == 1 && "Invalid number of operands!");
2814     // The constant encodes as the immediate-1, and we store in the instruction
2815     // the bits as encoded, so subtract off one here.
2816     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2817     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2818   }
2819 
addImmThumbSROperands(MCInst & Inst,unsigned N) const2820   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2821     assert(N == 1 && "Invalid number of operands!");
2822     // The constant encodes as the immediate, except for 32, which encodes as
2823     // zero.
2824     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2825     unsigned Imm = CE->getValue();
2826     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2827   }
2828 
addPKHASRImmOperands(MCInst & Inst,unsigned N) const2829   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2830     assert(N == 1 && "Invalid number of operands!");
2831     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2832     // the instruction as well.
2833     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2834     int Val = CE->getValue();
2835     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2836   }
2837 
addT2SOImmNotOperands(MCInst & Inst,unsigned N) const2838   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2839     assert(N == 1 && "Invalid number of operands!");
2840     // The operand is actually a t2_so_imm, but we have its bitwise
2841     // negation in the assembly source, so twiddle it here.
2842     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2843     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2844   }
2845 
addT2SOImmNegOperands(MCInst & Inst,unsigned N) const2846   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2847     assert(N == 1 && "Invalid number of operands!");
2848     // The operand is actually a t2_so_imm, but we have its
2849     // negation in the assembly source, so twiddle it here.
2850     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2851     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2852   }
2853 
addImm0_4095NegOperands(MCInst & Inst,unsigned N) const2854   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2855     assert(N == 1 && "Invalid number of operands!");
2856     // The operand is actually an imm0_4095, but we have its
2857     // negation in the assembly source, so twiddle it here.
2858     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2859     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2860   }
2861 
addUnsignedOffset_b8s2Operands(MCInst & Inst,unsigned N) const2862   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2863     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2864       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2865       return;
2866     }
2867     const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2868     Inst.addOperand(MCOperand::createExpr(SR));
2869   }
2870 
addThumbMemPCOperands(MCInst & Inst,unsigned N) const2871   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2872     assert(N == 1 && "Invalid number of operands!");
2873     if (isImm()) {
2874       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2875       if (CE) {
2876         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2877         return;
2878       }
2879       const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2880       Inst.addOperand(MCOperand::createExpr(SR));
2881       return;
2882     }
2883 
2884     assert(isGPRMem()  && "Unknown value type!");
2885     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2886     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2887       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2888     else
2889       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2890   }
2891 
addMemBarrierOptOperands(MCInst & Inst,unsigned N) const2892   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2893     assert(N == 1 && "Invalid number of operands!");
2894     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2895   }
2896 
addInstSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2897   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2898     assert(N == 1 && "Invalid number of operands!");
2899     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2900   }
2901 
addTraceSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2902   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2903     assert(N == 1 && "Invalid number of operands!");
2904     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2905   }
2906 
addMemNoOffsetOperands(MCInst & Inst,unsigned N) const2907   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2908     assert(N == 1 && "Invalid number of operands!");
2909     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2910   }
2911 
addMemNoOffsetT2Operands(MCInst & Inst,unsigned N) const2912   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2913     assert(N == 1 && "Invalid number of operands!");
2914     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2915   }
2916 
addMemNoOffsetT2NoSpOperands(MCInst & Inst,unsigned N) const2917   void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2918     assert(N == 1 && "Invalid number of operands!");
2919     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2920   }
2921 
addMemNoOffsetTOperands(MCInst & Inst,unsigned N) const2922   void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2923     assert(N == 1 && "Invalid number of operands!");
2924     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2925   }
2926 
addMemPCRelImm12Operands(MCInst & Inst,unsigned N) const2927   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2928     assert(N == 1 && "Invalid number of operands!");
2929     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2930       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2931     else
2932       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2933   }
2934 
addAdrLabelOperands(MCInst & Inst,unsigned N) const2935   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2936     assert(N == 1 && "Invalid number of operands!");
2937     assert(isImm() && "Not an immediate!");
2938 
2939     // If we have an immediate that's not a constant, treat it as a label
2940     // reference needing a fixup.
2941     if (!isa<MCConstantExpr>(getImm())) {
2942       Inst.addOperand(MCOperand::createExpr(getImm()));
2943       return;
2944     }
2945 
2946     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2947     int Val = CE->getValue();
2948     Inst.addOperand(MCOperand::createImm(Val));
2949   }
2950 
addAlignedMemoryOperands(MCInst & Inst,unsigned N) const2951   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2952     assert(N == 2 && "Invalid number of operands!");
2953     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2954     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2955   }
2956 
addDupAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2957   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2958     addAlignedMemoryOperands(Inst, N);
2959   }
2960 
addAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2961   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2962     addAlignedMemoryOperands(Inst, N);
2963   }
2964 
addAlignedMemory16Operands(MCInst & Inst,unsigned N) const2965   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2966     addAlignedMemoryOperands(Inst, N);
2967   }
2968 
addDupAlignedMemory16Operands(MCInst & Inst,unsigned N) const2969   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2970     addAlignedMemoryOperands(Inst, N);
2971   }
2972 
addAlignedMemory32Operands(MCInst & Inst,unsigned N) const2973   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2974     addAlignedMemoryOperands(Inst, N);
2975   }
2976 
addDupAlignedMemory32Operands(MCInst & Inst,unsigned N) const2977   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2978     addAlignedMemoryOperands(Inst, N);
2979   }
2980 
addAlignedMemory64Operands(MCInst & Inst,unsigned N) const2981   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2982     addAlignedMemoryOperands(Inst, N);
2983   }
2984 
addDupAlignedMemory64Operands(MCInst & Inst,unsigned N) const2985   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2986     addAlignedMemoryOperands(Inst, N);
2987   }
2988 
addAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2989   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2990     addAlignedMemoryOperands(Inst, N);
2991   }
2992 
addDupAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2993   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2994     addAlignedMemoryOperands(Inst, N);
2995   }
2996 
addAlignedMemory64or128or256Operands(MCInst & Inst,unsigned N) const2997   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2998     addAlignedMemoryOperands(Inst, N);
2999   }
3000 
addAddrMode2Operands(MCInst & Inst,unsigned N) const3001   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
3002     assert(N == 3 && "Invalid number of operands!");
3003     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3004     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3005     if (!Memory.OffsetRegNum) {
3006       if (!Memory.OffsetImm)
3007         Inst.addOperand(MCOperand::createImm(0));
3008       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3009         int32_t Val = CE->getValue();
3010         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3011         // Special case for #-0
3012         if (Val == std::numeric_limits<int32_t>::min())
3013           Val = 0;
3014         if (Val < 0)
3015           Val = -Val;
3016         Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3017         Inst.addOperand(MCOperand::createImm(Val));
3018       } else
3019         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3020     } else {
3021       // For register offset, we encode the shift type and negation flag
3022       // here.
3023       int32_t Val =
3024           ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3025                             Memory.ShiftImm, Memory.ShiftType);
3026       Inst.addOperand(MCOperand::createImm(Val));
3027     }
3028   }
3029 
addAM2OffsetImmOperands(MCInst & Inst,unsigned N) const3030   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
3031     assert(N == 2 && "Invalid number of operands!");
3032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3033     assert(CE && "non-constant AM2OffsetImm operand!");
3034     int32_t Val = CE->getValue();
3035     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3036     // Special case for #-0
3037     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3038     if (Val < 0) Val = -Val;
3039     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3040     Inst.addOperand(MCOperand::createReg(0));
3041     Inst.addOperand(MCOperand::createImm(Val));
3042   }
3043 
addAddrMode3Operands(MCInst & Inst,unsigned N) const3044   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3045     assert(N == 3 && "Invalid number of operands!");
3046     // If we have an immediate that's not a constant, treat it as a label
3047     // reference needing a fixup. If it is a constant, it's something else
3048     // and we reject it.
3049     if (isImm()) {
3050       Inst.addOperand(MCOperand::createExpr(getImm()));
3051       Inst.addOperand(MCOperand::createReg(0));
3052       Inst.addOperand(MCOperand::createImm(0));
3053       return;
3054     }
3055 
3056     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3057     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3058     if (!Memory.OffsetRegNum) {
3059       if (!Memory.OffsetImm)
3060         Inst.addOperand(MCOperand::createImm(0));
3061       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3062         int32_t Val = CE->getValue();
3063         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3064         // Special case for #-0
3065         if (Val == std::numeric_limits<int32_t>::min())
3066           Val = 0;
3067         if (Val < 0)
3068           Val = -Val;
3069         Val = ARM_AM::getAM3Opc(AddSub, Val);
3070         Inst.addOperand(MCOperand::createImm(Val));
3071       } else
3072         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3073     } else {
3074       // For register offset, we encode the shift type and negation flag
3075       // here.
3076       int32_t Val =
3077           ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3078       Inst.addOperand(MCOperand::createImm(Val));
3079     }
3080   }
3081 
addAM3OffsetOperands(MCInst & Inst,unsigned N) const3082   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3083     assert(N == 2 && "Invalid number of operands!");
3084     if (Kind == k_PostIndexRegister) {
3085       int32_t Val =
3086         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3087       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3088       Inst.addOperand(MCOperand::createImm(Val));
3089       return;
3090     }
3091 
3092     // Constant offset.
3093     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3094     int32_t Val = CE->getValue();
3095     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3096     // Special case for #-0
3097     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3098     if (Val < 0) Val = -Val;
3099     Val = ARM_AM::getAM3Opc(AddSub, Val);
3100     Inst.addOperand(MCOperand::createReg(0));
3101     Inst.addOperand(MCOperand::createImm(Val));
3102   }
3103 
addAddrMode5Operands(MCInst & Inst,unsigned N) const3104   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3105     assert(N == 2 && "Invalid number of operands!");
3106     // If we have an immediate that's not a constant, treat it as a label
3107     // reference needing a fixup. If it is a constant, it's something else
3108     // and we reject it.
3109     if (isImm()) {
3110       Inst.addOperand(MCOperand::createExpr(getImm()));
3111       Inst.addOperand(MCOperand::createImm(0));
3112       return;
3113     }
3114 
3115     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3116     if (!Memory.OffsetImm)
3117       Inst.addOperand(MCOperand::createImm(0));
3118     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3119       // The lower two bits are always zero and as such are not encoded.
3120       int32_t Val = CE->getValue() / 4;
3121       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3122       // Special case for #-0
3123       if (Val == std::numeric_limits<int32_t>::min())
3124         Val = 0;
3125       if (Val < 0)
3126         Val = -Val;
3127       Val = ARM_AM::getAM5Opc(AddSub, Val);
3128       Inst.addOperand(MCOperand::createImm(Val));
3129     } else
3130       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3131   }
3132 
addAddrMode5FP16Operands(MCInst & Inst,unsigned N) const3133   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3134     assert(N == 2 && "Invalid number of operands!");
3135     // If we have an immediate that's not a constant, treat it as a label
3136     // reference needing a fixup. If it is a constant, it's something else
3137     // and we reject it.
3138     if (isImm()) {
3139       Inst.addOperand(MCOperand::createExpr(getImm()));
3140       Inst.addOperand(MCOperand::createImm(0));
3141       return;
3142     }
3143 
3144     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3145     // The lower bit is always zero and as such is not encoded.
3146     if (!Memory.OffsetImm)
3147       Inst.addOperand(MCOperand::createImm(0));
3148     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3149       int32_t Val = CE->getValue() / 2;
3150       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3151       // Special case for #-0
3152       if (Val == std::numeric_limits<int32_t>::min())
3153         Val = 0;
3154       if (Val < 0)
3155         Val = -Val;
3156       Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3157       Inst.addOperand(MCOperand::createImm(Val));
3158     } else
3159       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3160   }
3161 
addMemImm8s4OffsetOperands(MCInst & Inst,unsigned N) const3162   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3163     assert(N == 2 && "Invalid number of operands!");
3164     // If we have an immediate that's not a constant, treat it as a label
3165     // reference needing a fixup. If it is a constant, it's something else
3166     // and we reject it.
3167     if (isImm()) {
3168       Inst.addOperand(MCOperand::createExpr(getImm()));
3169       Inst.addOperand(MCOperand::createImm(0));
3170       return;
3171     }
3172 
3173     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3174     addExpr(Inst, Memory.OffsetImm);
3175   }
3176 
addMemImm7s4OffsetOperands(MCInst & Inst,unsigned N) const3177   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3178     assert(N == 2 && "Invalid number of operands!");
3179     // If we have an immediate that's not a constant, treat it as a label
3180     // reference needing a fixup. If it is a constant, it's something else
3181     // and we reject it.
3182     if (isImm()) {
3183       Inst.addOperand(MCOperand::createExpr(getImm()));
3184       Inst.addOperand(MCOperand::createImm(0));
3185       return;
3186     }
3187 
3188     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3189     addExpr(Inst, Memory.OffsetImm);
3190   }
3191 
addMemImm0_1020s4OffsetOperands(MCInst & Inst,unsigned N) const3192   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3193     assert(N == 2 && "Invalid number of operands!");
3194     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3195     if (!Memory.OffsetImm)
3196       Inst.addOperand(MCOperand::createImm(0));
3197     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3198       // The lower two bits are always zero and as such are not encoded.
3199       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3200     else
3201       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3202   }
3203 
addMemImmOffsetOperands(MCInst & Inst,unsigned N) const3204   void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3205     assert(N == 2 && "Invalid number of operands!");
3206     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3207     addExpr(Inst, Memory.OffsetImm);
3208   }
3209 
addMemRegRQOffsetOperands(MCInst & Inst,unsigned N) const3210   void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3211     assert(N == 2 && "Invalid number of operands!");
3212     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3213     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3214   }
3215 
addMemUImm12OffsetOperands(MCInst & Inst,unsigned N) const3216   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3217     assert(N == 2 && "Invalid number of operands!");
3218     // If this is an immediate, it's a label reference.
3219     if (isImm()) {
3220       addExpr(Inst, getImm());
3221       Inst.addOperand(MCOperand::createImm(0));
3222       return;
3223     }
3224 
3225     // Otherwise, it's a normal memory reg+offset.
3226     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3227     addExpr(Inst, Memory.OffsetImm);
3228   }
3229 
addMemImm12OffsetOperands(MCInst & Inst,unsigned N) const3230   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3231     assert(N == 2 && "Invalid number of operands!");
3232     // If this is an immediate, it's a label reference.
3233     if (isImm()) {
3234       addExpr(Inst, getImm());
3235       Inst.addOperand(MCOperand::createImm(0));
3236       return;
3237     }
3238 
3239     // Otherwise, it's a normal memory reg+offset.
3240     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3241     addExpr(Inst, Memory.OffsetImm);
3242   }
3243 
addConstPoolAsmImmOperands(MCInst & Inst,unsigned N) const3244   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3245     assert(N == 1 && "Invalid number of operands!");
3246     // This is container for the immediate that we will create the constant
3247     // pool from
3248     addExpr(Inst, getConstantPoolImm());
3249   }
3250 
addMemTBBOperands(MCInst & Inst,unsigned N) const3251   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3252     assert(N == 2 && "Invalid number of operands!");
3253     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3254     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3255   }
3256 
addMemTBHOperands(MCInst & Inst,unsigned N) const3257   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3258     assert(N == 2 && "Invalid number of operands!");
3259     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3260     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3261   }
3262 
addMemRegOffsetOperands(MCInst & Inst,unsigned N) const3263   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3264     assert(N == 3 && "Invalid number of operands!");
3265     unsigned Val =
3266       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3267                         Memory.ShiftImm, Memory.ShiftType);
3268     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3269     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3270     Inst.addOperand(MCOperand::createImm(Val));
3271   }
3272 
addT2MemRegOffsetOperands(MCInst & Inst,unsigned N) const3273   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3274     assert(N == 3 && "Invalid number of operands!");
3275     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3276     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3277     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3278   }
3279 
addMemThumbRROperands(MCInst & Inst,unsigned N) const3280   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3281     assert(N == 2 && "Invalid number of operands!");
3282     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3283     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3284   }
3285 
addMemThumbRIs4Operands(MCInst & Inst,unsigned N) const3286   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3287     assert(N == 2 && "Invalid number of operands!");
3288     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3289     if (!Memory.OffsetImm)
3290       Inst.addOperand(MCOperand::createImm(0));
3291     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3292       // The lower two bits are always zero and as such are not encoded.
3293       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3294     else
3295       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3296   }
3297 
addMemThumbRIs2Operands(MCInst & Inst,unsigned N) const3298   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3299     assert(N == 2 && "Invalid number of operands!");
3300     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3301     if (!Memory.OffsetImm)
3302       Inst.addOperand(MCOperand::createImm(0));
3303     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3304       Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3305     else
3306       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3307   }
3308 
addMemThumbRIs1Operands(MCInst & Inst,unsigned N) const3309   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3310     assert(N == 2 && "Invalid number of operands!");
3311     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3312     addExpr(Inst, Memory.OffsetImm);
3313   }
3314 
addMemThumbSPIOperands(MCInst & Inst,unsigned N) const3315   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3316     assert(N == 2 && "Invalid number of operands!");
3317     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3318     if (!Memory.OffsetImm)
3319       Inst.addOperand(MCOperand::createImm(0));
3320     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3321       // The lower two bits are always zero and as such are not encoded.
3322       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3323     else
3324       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3325   }
3326 
addPostIdxImm8Operands(MCInst & Inst,unsigned N) const3327   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3328     assert(N == 1 && "Invalid number of operands!");
3329     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3330     assert(CE && "non-constant post-idx-imm8 operand!");
3331     int Imm = CE->getValue();
3332     bool isAdd = Imm >= 0;
3333     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3334     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3335     Inst.addOperand(MCOperand::createImm(Imm));
3336   }
3337 
addPostIdxImm8s4Operands(MCInst & Inst,unsigned N) const3338   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3339     assert(N == 1 && "Invalid number of operands!");
3340     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3341     assert(CE && "non-constant post-idx-imm8s4 operand!");
3342     int Imm = CE->getValue();
3343     bool isAdd = Imm >= 0;
3344     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3345     // Immediate is scaled by 4.
3346     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3347     Inst.addOperand(MCOperand::createImm(Imm));
3348   }
3349 
addPostIdxRegOperands(MCInst & Inst,unsigned N) const3350   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3351     assert(N == 2 && "Invalid number of operands!");
3352     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3353     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3354   }
3355 
addPostIdxRegShiftedOperands(MCInst & Inst,unsigned N) const3356   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3357     assert(N == 2 && "Invalid number of operands!");
3358     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3359     // The sign, shift type, and shift amount are encoded in a single operand
3360     // using the AM2 encoding helpers.
3361     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3362     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3363                                      PostIdxReg.ShiftTy);
3364     Inst.addOperand(MCOperand::createImm(Imm));
3365   }
3366 
addPowerTwoOperands(MCInst & Inst,unsigned N) const3367   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3368     assert(N == 1 && "Invalid number of operands!");
3369     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3370     Inst.addOperand(MCOperand::createImm(CE->getValue()));
3371   }
3372 
addMSRMaskOperands(MCInst & Inst,unsigned N) const3373   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3374     assert(N == 1 && "Invalid number of operands!");
3375     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3376   }
3377 
addBankedRegOperands(MCInst & Inst,unsigned N) const3378   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3379     assert(N == 1 && "Invalid number of operands!");
3380     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3381   }
3382 
addProcIFlagsOperands(MCInst & Inst,unsigned N) const3383   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3384     assert(N == 1 && "Invalid number of operands!");
3385     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3386   }
3387 
addVecListOperands(MCInst & Inst,unsigned N) const3388   void addVecListOperands(MCInst &Inst, unsigned N) const {
3389     assert(N == 1 && "Invalid number of operands!");
3390 
3391     if (isAnyVectorList())
3392       Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3393     else if (isDReg() && !Parser->hasMVE()) {
3394       Inst.addOperand(MCOperand::createReg(Reg.RegNum));
3395     } else if (isQReg() && !Parser->hasMVE()) {
3396       auto DPair = Parser->getDRegFromQReg(Reg.RegNum);
3397       DPair = Parser->getMRI()->getMatchingSuperReg(
3398           DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3399       Inst.addOperand(MCOperand::createReg(DPair));
3400     } else {
3401       LLVM_DEBUG(dbgs() << "TYPE: " << Kind << "\n");
3402       llvm_unreachable(
3403           "attempted to add a vector list register with wrong type!");
3404     }
3405   }
3406 
addMVEVecListOperands(MCInst & Inst,unsigned N) const3407   void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3408     assert(N == 1 && "Invalid number of operands!");
3409 
3410     // When we come here, the VectorList field will identify a range
3411     // of q-registers by its base register and length, and it will
3412     // have already been error-checked to be the expected length of
3413     // range and contain only q-regs in the range q0-q7. So we can
3414     // count on the base register being in the range q0-q6 (for 2
3415     // regs) or q0-q4 (for 4)
3416     //
3417     // The MVE instructions taking a register range of this kind will
3418     // need an operand in the MQQPR or MQQQQPR class, representing the
3419     // entire range as a unit. So we must translate into that class,
3420     // by finding the index of the base register in the MQPR reg
3421     // class, and returning the super-register at the corresponding
3422     // index in the target class.
3423 
3424     const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3425     const MCRegisterClass *RC_out =
3426         (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3427                                 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3428 
3429     unsigned I, E = RC_out->getNumRegs();
3430     for (I = 0; I < E; I++)
3431       if (RC_in->getRegister(I) == VectorList.RegNum)
3432         break;
3433     assert(I < E && "Invalid vector list start register!");
3434 
3435     Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3436   }
3437 
addVecListIndexedOperands(MCInst & Inst,unsigned N) const3438   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3439     assert(N == 2 && "Invalid number of operands!");
3440     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3441     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3442   }
3443 
addVectorIndex8Operands(MCInst & Inst,unsigned N) const3444   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3445     assert(N == 1 && "Invalid number of operands!");
3446     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3447   }
3448 
addVectorIndex16Operands(MCInst & Inst,unsigned N) const3449   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3450     assert(N == 1 && "Invalid number of operands!");
3451     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3452   }
3453 
addVectorIndex32Operands(MCInst & Inst,unsigned N) const3454   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3455     assert(N == 1 && "Invalid number of operands!");
3456     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3457   }
3458 
addVectorIndex64Operands(MCInst & Inst,unsigned N) const3459   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3460     assert(N == 1 && "Invalid number of operands!");
3461     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3462   }
3463 
addMVEVectorIndexOperands(MCInst & Inst,unsigned N) const3464   void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3465     assert(N == 1 && "Invalid number of operands!");
3466     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3467   }
3468 
addMVEPairVectorIndexOperands(MCInst & Inst,unsigned N) const3469   void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3470     assert(N == 1 && "Invalid number of operands!");
3471     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3472   }
3473 
addNEONi8splatOperands(MCInst & Inst,unsigned N) const3474   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3475     assert(N == 1 && "Invalid number of operands!");
3476     // The immediate encodes the type of constant as well as the value.
3477     // Mask in that this is an i8 splat.
3478     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3479     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3480   }
3481 
addNEONi16splatOperands(MCInst & Inst,unsigned N) const3482   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3483     assert(N == 1 && "Invalid number of operands!");
3484     // The immediate encodes the type of constant as well as the value.
3485     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3486     unsigned Value = CE->getValue();
3487     Value = ARM_AM::encodeNEONi16splat(Value);
3488     Inst.addOperand(MCOperand::createImm(Value));
3489   }
3490 
addNEONi16splatNotOperands(MCInst & Inst,unsigned N) const3491   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3492     assert(N == 1 && "Invalid number of operands!");
3493     // The immediate encodes the type of constant as well as the value.
3494     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3495     unsigned Value = CE->getValue();
3496     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3497     Inst.addOperand(MCOperand::createImm(Value));
3498   }
3499 
addNEONi32splatOperands(MCInst & Inst,unsigned N) const3500   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3501     assert(N == 1 && "Invalid number of operands!");
3502     // The immediate encodes the type of constant as well as the value.
3503     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3504     unsigned Value = CE->getValue();
3505     Value = ARM_AM::encodeNEONi32splat(Value);
3506     Inst.addOperand(MCOperand::createImm(Value));
3507   }
3508 
addNEONi32splatNotOperands(MCInst & Inst,unsigned N) const3509   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3510     assert(N == 1 && "Invalid number of operands!");
3511     // The immediate encodes the type of constant as well as the value.
3512     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3513     unsigned Value = CE->getValue();
3514     Value = ARM_AM::encodeNEONi32splat(~Value);
3515     Inst.addOperand(MCOperand::createImm(Value));
3516   }
3517 
addNEONi8ReplicateOperands(MCInst & Inst,bool Inv) const3518   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3519     // The immediate encodes the type of constant as well as the value.
3520     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3521     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3522             Inst.getOpcode() == ARM::VMOVv16i8) &&
3523           "All instructions that wants to replicate non-zero byte "
3524           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3525     unsigned Value = CE->getValue();
3526     if (Inv)
3527       Value = ~Value;
3528     unsigned B = Value & 0xff;
3529     B |= 0xe00; // cmode = 0b1110
3530     Inst.addOperand(MCOperand::createImm(B));
3531   }
3532 
addNEONinvi8ReplicateOperands(MCInst & Inst,unsigned N) const3533   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3534     assert(N == 1 && "Invalid number of operands!");
3535     addNEONi8ReplicateOperands(Inst, true);
3536   }
3537 
encodeNeonVMOVImmediate(unsigned Value)3538   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3539     if (Value >= 256 && Value <= 0xffff)
3540       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3541     else if (Value > 0xffff && Value <= 0xffffff)
3542       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3543     else if (Value > 0xffffff)
3544       Value = (Value >> 24) | 0x600;
3545     return Value;
3546   }
3547 
addNEONi32vmovOperands(MCInst & Inst,unsigned N) const3548   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3549     assert(N == 1 && "Invalid number of operands!");
3550     // The immediate encodes the type of constant as well as the value.
3551     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3552     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3553     Inst.addOperand(MCOperand::createImm(Value));
3554   }
3555 
addNEONvmovi8ReplicateOperands(MCInst & Inst,unsigned N) const3556   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3557     assert(N == 1 && "Invalid number of operands!");
3558     addNEONi8ReplicateOperands(Inst, false);
3559   }
3560 
addNEONvmovi16ReplicateOperands(MCInst & Inst,unsigned N) const3561   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3562     assert(N == 1 && "Invalid number of operands!");
3563     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3564     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3565             Inst.getOpcode() == ARM::VMOVv8i16 ||
3566             Inst.getOpcode() == ARM::VMVNv4i16 ||
3567             Inst.getOpcode() == ARM::VMVNv8i16) &&
3568           "All instructions that want to replicate non-zero half-word "
3569           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3570     uint64_t Value = CE->getValue();
3571     unsigned Elem = Value & 0xffff;
3572     if (Elem >= 256)
3573       Elem = (Elem >> 8) | 0x200;
3574     Inst.addOperand(MCOperand::createImm(Elem));
3575   }
3576 
addNEONi32vmovNegOperands(MCInst & Inst,unsigned N) const3577   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3578     assert(N == 1 && "Invalid number of operands!");
3579     // The immediate encodes the type of constant as well as the value.
3580     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3581     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3582     Inst.addOperand(MCOperand::createImm(Value));
3583   }
3584 
addNEONvmovi32ReplicateOperands(MCInst & Inst,unsigned N) const3585   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3586     assert(N == 1 && "Invalid number of operands!");
3587     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3588     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3589             Inst.getOpcode() == ARM::VMOVv4i32 ||
3590             Inst.getOpcode() == ARM::VMVNv2i32 ||
3591             Inst.getOpcode() == ARM::VMVNv4i32) &&
3592           "All instructions that want to replicate non-zero word "
3593           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3594     uint64_t Value = CE->getValue();
3595     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3596     Inst.addOperand(MCOperand::createImm(Elem));
3597   }
3598 
addNEONi64splatOperands(MCInst & Inst,unsigned N) const3599   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3600     assert(N == 1 && "Invalid number of operands!");
3601     // The immediate encodes the type of constant as well as the value.
3602     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3603     uint64_t Value = CE->getValue();
3604     unsigned Imm = 0;
3605     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3606       Imm |= (Value & 1) << i;
3607     }
3608     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3609   }
3610 
addComplexRotationEvenOperands(MCInst & Inst,unsigned N) const3611   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3612     assert(N == 1 && "Invalid number of operands!");
3613     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3614     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3615   }
3616 
addComplexRotationOddOperands(MCInst & Inst,unsigned N) const3617   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3618     assert(N == 1 && "Invalid number of operands!");
3619     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3620     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3621   }
3622 
addMveSaturateOperands(MCInst & Inst,unsigned N) const3623   void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3624     assert(N == 1 && "Invalid number of operands!");
3625     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3626     unsigned Imm = CE->getValue();
3627     assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3628     Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3629   }
3630 
3631   void print(raw_ostream &OS) const override;
3632 
CreateITMask(unsigned Mask,SMLoc S,ARMAsmParser & Parser)3633   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S,
3634                                                   ARMAsmParser &Parser) {
3635     auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3636     Op->ITMask.Mask = Mask;
3637     Op->StartLoc = S;
3638     Op->EndLoc = S;
3639     return Op;
3640   }
3641 
3642   static std::unique_ptr<ARMOperand>
CreateCondCode(ARMCC::CondCodes CC,SMLoc S,ARMAsmParser & Parser)3643   CreateCondCode(ARMCC::CondCodes CC, SMLoc S, ARMAsmParser &Parser) {
3644     auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3645     Op->CC.Val = CC;
3646     Op->StartLoc = S;
3647     Op->EndLoc = S;
3648     return Op;
3649   }
3650 
CreateVPTPred(ARMVCC::VPTCodes CC,SMLoc S,ARMAsmParser & Parser)3651   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, SMLoc S,
3652                                                    ARMAsmParser &Parser) {
3653     auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3654     Op->VCC.Val = CC;
3655     Op->StartLoc = S;
3656     Op->EndLoc = S;
3657     return Op;
3658   }
3659 
CreateCoprocNum(unsigned CopVal,SMLoc S,ARMAsmParser & Parser)3660   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S,
3661                                                      ARMAsmParser &Parser) {
3662     auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3663     Op->Cop.Val = CopVal;
3664     Op->StartLoc = S;
3665     Op->EndLoc = S;
3666     return Op;
3667   }
3668 
CreateCoprocReg(unsigned CopVal,SMLoc S,ARMAsmParser & Parser)3669   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S,
3670                                                      ARMAsmParser &Parser) {
3671     auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3672     Op->Cop.Val = CopVal;
3673     Op->StartLoc = S;
3674     Op->EndLoc = S;
3675     return Op;
3676   }
3677 
3678   static std::unique_ptr<ARMOperand>
CreateCoprocOption(unsigned Val,SMLoc S,SMLoc E,ARMAsmParser & Parser)3679   CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3680     auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3681     Op->Cop.Val = Val;
3682     Op->StartLoc = S;
3683     Op->EndLoc = E;
3684     return Op;
3685   }
3686 
CreateCCOut(unsigned RegNum,SMLoc S,ARMAsmParser & Parser)3687   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S,
3688                                                  ARMAsmParser &Parser) {
3689     auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3690     Op->Reg.RegNum = RegNum;
3691     Op->StartLoc = S;
3692     Op->EndLoc = S;
3693     return Op;
3694   }
3695 
CreateToken(StringRef Str,SMLoc S,ARMAsmParser & Parser)3696   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S,
3697                                                  ARMAsmParser &Parser) {
3698     auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3699     Op->Tok.Data = Str.data();
3700     Op->Tok.Length = Str.size();
3701     Op->StartLoc = S;
3702     Op->EndLoc = S;
3703     return Op;
3704   }
3705 
CreateReg(unsigned RegNum,SMLoc S,SMLoc E,ARMAsmParser & Parser)3706   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3707                                                SMLoc E, ARMAsmParser &Parser) {
3708     auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3709     Op->Reg.RegNum = RegNum;
3710     Op->StartLoc = S;
3711     Op->EndLoc = E;
3712     return Op;
3713   }
3714 
3715   static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftReg,unsigned ShiftImm,SMLoc S,SMLoc E,ARMAsmParser & Parser)3716   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3717                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S, SMLoc E,
3718                         ARMAsmParser &Parser) {
3719     auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3720     Op->RegShiftedReg.ShiftTy = ShTy;
3721     Op->RegShiftedReg.SrcReg = SrcReg;
3722     Op->RegShiftedReg.ShiftReg = ShiftReg;
3723     Op->RegShiftedReg.ShiftImm = ShiftImm;
3724     Op->StartLoc = S;
3725     Op->EndLoc = E;
3726     return Op;
3727   }
3728 
3729   static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftImm,SMLoc S,SMLoc E,ARMAsmParser & Parser)3730   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3731                          unsigned ShiftImm, SMLoc S, SMLoc E,
3732                          ARMAsmParser &Parser) {
3733     auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3734     Op->RegShiftedImm.ShiftTy = ShTy;
3735     Op->RegShiftedImm.SrcReg = SrcReg;
3736     Op->RegShiftedImm.ShiftImm = ShiftImm;
3737     Op->StartLoc = S;
3738     Op->EndLoc = E;
3739     return Op;
3740   }
3741 
CreateShifterImm(bool isASR,unsigned Imm,SMLoc S,SMLoc E,ARMAsmParser & Parser)3742   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3743                                                       SMLoc S, SMLoc E,
3744                                                       ARMAsmParser &Parser) {
3745     auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3746     Op->ShifterImm.isASR = isASR;
3747     Op->ShifterImm.Imm = Imm;
3748     Op->StartLoc = S;
3749     Op->EndLoc = E;
3750     return Op;
3751   }
3752 
3753   static std::unique_ptr<ARMOperand>
CreateRotImm(unsigned Imm,SMLoc S,SMLoc E,ARMAsmParser & Parser)3754   CreateRotImm(unsigned Imm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3755     auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3756     Op->RotImm.Imm = Imm;
3757     Op->StartLoc = S;
3758     Op->EndLoc = E;
3759     return Op;
3760   }
3761 
CreateModImm(unsigned Bits,unsigned Rot,SMLoc S,SMLoc E,ARMAsmParser & Parser)3762   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3763                                                   SMLoc S, SMLoc E,
3764                                                   ARMAsmParser &Parser) {
3765     auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3766     Op->ModImm.Bits = Bits;
3767     Op->ModImm.Rot = Rot;
3768     Op->StartLoc = S;
3769     Op->EndLoc = E;
3770     return Op;
3771   }
3772 
3773   static std::unique_ptr<ARMOperand>
CreateConstantPoolImm(const MCExpr * Val,SMLoc S,SMLoc E,ARMAsmParser & Parser)3774   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E,
3775                         ARMAsmParser &Parser) {
3776     auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3777     Op->Imm.Val = Val;
3778     Op->StartLoc = S;
3779     Op->EndLoc = E;
3780     return Op;
3781   }
3782 
CreateBitfield(unsigned LSB,unsigned Width,SMLoc S,SMLoc E,ARMAsmParser & Parser)3783   static std::unique_ptr<ARMOperand> CreateBitfield(unsigned LSB,
3784                                                     unsigned Width, SMLoc S,
3785                                                     SMLoc E,
3786                                                     ARMAsmParser &Parser) {
3787     auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3788     Op->Bitfield.LSB = LSB;
3789     Op->Bitfield.Width = Width;
3790     Op->StartLoc = S;
3791     Op->EndLoc = E;
3792     return Op;
3793   }
3794 
3795   static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,SMLoc StartLoc,SMLoc EndLoc,ARMAsmParser & Parser)3796   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3797                 SMLoc StartLoc, SMLoc EndLoc, ARMAsmParser &Parser) {
3798     assert(Regs.size() > 0 && "RegList contains no registers?");
3799     KindTy Kind = k_RegisterList;
3800 
3801     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3802             Regs.front().second)) {
3803       if (Regs.back().second == ARM::VPR)
3804         Kind = k_FPDRegisterListWithVPR;
3805       else
3806         Kind = k_DPRRegisterList;
3807     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3808                    Regs.front().second)) {
3809       if (Regs.back().second == ARM::VPR)
3810         Kind = k_FPSRegisterListWithVPR;
3811       else
3812         Kind = k_SPRRegisterList;
3813     }
3814 
3815     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3816       Kind = k_RegisterListWithAPSR;
3817 
3818     assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3819 
3820     auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3821     for (const auto &P : Regs)
3822       Op->Registers.push_back(P.second);
3823 
3824     Op->StartLoc = StartLoc;
3825     Op->EndLoc = EndLoc;
3826     return Op;
3827   }
3828 
3829   static std::unique_ptr<ARMOperand>
CreateVectorList(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E,ARMAsmParser & Parser)3830   CreateVectorList(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3831                    SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3832     auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3833     Op->VectorList.RegNum = RegNum;
3834     Op->VectorList.Count = Count;
3835     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3836     Op->StartLoc = S;
3837     Op->EndLoc = E;
3838     return Op;
3839   }
3840 
3841   static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E,ARMAsmParser & Parser)3842   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3843                            SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3844     auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3845     Op->VectorList.RegNum = RegNum;
3846     Op->VectorList.Count = Count;
3847     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3848     Op->StartLoc = S;
3849     Op->EndLoc = E;
3850     return Op;
3851   }
3852 
3853   static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum,unsigned Count,unsigned Index,bool isDoubleSpaced,SMLoc S,SMLoc E,ARMAsmParser & Parser)3854   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3855                           bool isDoubleSpaced, SMLoc S, SMLoc E,
3856                           ARMAsmParser &Parser) {
3857     auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3858     Op->VectorList.RegNum = RegNum;
3859     Op->VectorList.Count = Count;
3860     Op->VectorList.LaneIndex = Index;
3861     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3862     Op->StartLoc = S;
3863     Op->EndLoc = E;
3864     return Op;
3865   }
3866 
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx,ARMAsmParser & Parser)3867   static std::unique_ptr<ARMOperand> CreateVectorIndex(unsigned Idx, SMLoc S,
3868                                                        SMLoc E, MCContext &Ctx,
3869                                                        ARMAsmParser &Parser) {
3870     auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3871     Op->VectorIndex.Val = Idx;
3872     Op->StartLoc = S;
3873     Op->EndLoc = E;
3874     return Op;
3875   }
3876 
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,ARMAsmParser & Parser)3877   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3878                                                SMLoc E, ARMAsmParser &Parser) {
3879     auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3880     Op->Imm.Val = Val;
3881     Op->StartLoc = S;
3882     Op->EndLoc = E;
3883     return Op;
3884   }
3885 
3886   static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum,const MCExpr * OffsetImm,unsigned OffsetRegNum,ARM_AM::ShiftOpc ShiftType,unsigned ShiftImm,unsigned Alignment,bool isNegative,SMLoc S,SMLoc E,ARMAsmParser & Parser,SMLoc AlignmentLoc=SMLoc ())3887   CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3888             ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3889             bool isNegative, SMLoc S, SMLoc E, ARMAsmParser &Parser,
3890             SMLoc AlignmentLoc = SMLoc()) {
3891     auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3892     Op->Memory.BaseRegNum = BaseRegNum;
3893     Op->Memory.OffsetImm = OffsetImm;
3894     Op->Memory.OffsetRegNum = OffsetRegNum;
3895     Op->Memory.ShiftType = ShiftType;
3896     Op->Memory.ShiftImm = ShiftImm;
3897     Op->Memory.Alignment = Alignment;
3898     Op->Memory.isNegative = isNegative;
3899     Op->StartLoc = S;
3900     Op->EndLoc = E;
3901     Op->AlignmentLoc = AlignmentLoc;
3902     return Op;
3903   }
3904 
3905   static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum,bool isAdd,ARM_AM::ShiftOpc ShiftTy,unsigned ShiftImm,SMLoc S,SMLoc E,ARMAsmParser & Parser)3906   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3907                    unsigned ShiftImm, SMLoc S, SMLoc E, ARMAsmParser &Parser) {
3908     auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3909     Op->PostIdxReg.RegNum = RegNum;
3910     Op->PostIdxReg.isAdd = isAdd;
3911     Op->PostIdxReg.ShiftTy = ShiftTy;
3912     Op->PostIdxReg.ShiftImm = ShiftImm;
3913     Op->StartLoc = S;
3914     Op->EndLoc = E;
3915     return Op;
3916   }
3917 
3918   static std::unique_ptr<ARMOperand>
CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,SMLoc S,ARMAsmParser & Parser)3919   CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S, ARMAsmParser &Parser) {
3920     auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3921     Op->MBOpt.Val = Opt;
3922     Op->StartLoc = S;
3923     Op->EndLoc = S;
3924     return Op;
3925   }
3926 
3927   static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,SMLoc S,ARMAsmParser & Parser)3928   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S,
3929                            ARMAsmParser &Parser) {
3930     auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3931     Op->ISBOpt.Val = Opt;
3932     Op->StartLoc = S;
3933     Op->EndLoc = S;
3934     return Op;
3935   }
3936 
3937   static std::unique_ptr<ARMOperand>
CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt,SMLoc S,ARMAsmParser & Parser)3938   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S,
3939                             ARMAsmParser &Parser) {
3940     auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3941     Op->TSBOpt.Val = Opt;
3942     Op->StartLoc = S;
3943     Op->EndLoc = S;
3944     return Op;
3945   }
3946 
3947   static std::unique_ptr<ARMOperand>
CreateProcIFlags(ARM_PROC::IFlags IFlags,SMLoc S,ARMAsmParser & Parser)3948   CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S, ARMAsmParser &Parser) {
3949     auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3950     Op->IFlags.Val = IFlags;
3951     Op->StartLoc = S;
3952     Op->EndLoc = S;
3953     return Op;
3954   }
3955 
CreateMSRMask(unsigned MMask,SMLoc S,ARMAsmParser & Parser)3956   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S,
3957                                                    ARMAsmParser &Parser) {
3958     auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3959     Op->MMask.Val = MMask;
3960     Op->StartLoc = S;
3961     Op->EndLoc = S;
3962     return Op;
3963   }
3964 
CreateBankedReg(unsigned Reg,SMLoc S,ARMAsmParser & Parser)3965   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S,
3966                                                      ARMAsmParser &Parser) {
3967     auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3968     Op->BankedReg.Val = Reg;
3969     Op->StartLoc = S;
3970     Op->EndLoc = S;
3971     return Op;
3972   }
3973 };
3974 
3975 } // end anonymous namespace.
3976 
print(raw_ostream & OS) const3977 void ARMOperand::print(raw_ostream &OS) const {
3978   auto RegName = [](MCRegister Reg) {
3979     if (Reg)
3980       return ARMInstPrinter::getRegisterName(Reg);
3981     else
3982       return "noreg";
3983   };
3984 
3985   switch (Kind) {
3986   case k_CondCode:
3987     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3988     break;
3989   case k_VPTPred:
3990     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3991     break;
3992   case k_CCOut:
3993     OS << "<ccout " << RegName(getReg()) << ">";
3994     break;
3995   case k_ITCondMask: {
3996     static const char *const MaskStr[] = {
3997       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3998       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3999       "(t)",       "(tett)", "(tet)", "(tete)",
4000       "(te)",      "(teet)", "(tee)", "(teee)",
4001     };
4002     assert((ITMask.Mask & 0xf) == ITMask.Mask);
4003     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
4004     break;
4005   }
4006   case k_CoprocNum:
4007     OS << "<coprocessor number: " << getCoproc() << ">";
4008     break;
4009   case k_CoprocReg:
4010     OS << "<coprocessor register: " << getCoproc() << ">";
4011     break;
4012   case k_CoprocOption:
4013     OS << "<coprocessor option: " << CoprocOption.Val << ">";
4014     break;
4015   case k_MSRMask:
4016     OS << "<mask: " << getMSRMask() << ">";
4017     break;
4018   case k_BankedReg:
4019     OS << "<banked reg: " << getBankedReg() << ">";
4020     break;
4021   case k_Immediate:
4022     OS << *getImm();
4023     break;
4024   case k_MemBarrierOpt:
4025     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
4026     break;
4027   case k_InstSyncBarrierOpt:
4028     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
4029     break;
4030   case k_TraceSyncBarrierOpt:
4031     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
4032     break;
4033   case k_Memory:
4034     OS << "<memory";
4035     if (Memory.BaseRegNum)
4036       OS << " base:" << RegName(Memory.BaseRegNum);
4037     if (Memory.OffsetImm)
4038       OS << " offset-imm:" << *Memory.OffsetImm;
4039     if (Memory.OffsetRegNum)
4040       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
4041          << RegName(Memory.OffsetRegNum);
4042     if (Memory.ShiftType != ARM_AM::no_shift) {
4043       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
4044       OS << " shift-imm:" << Memory.ShiftImm;
4045     }
4046     if (Memory.Alignment)
4047       OS << " alignment:" << Memory.Alignment;
4048     OS << ">";
4049     break;
4050   case k_PostIndexRegister:
4051     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
4052        << RegName(PostIdxReg.RegNum);
4053     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
4054       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
4055          << PostIdxReg.ShiftImm;
4056     OS << ">";
4057     break;
4058   case k_ProcIFlags: {
4059     OS << "<ARM_PROC::";
4060     unsigned IFlags = getProcIFlags();
4061     for (int i=2; i >= 0; --i)
4062       if (IFlags & (1 << i))
4063         OS << ARM_PROC::IFlagsToString(1 << i);
4064     OS << ">";
4065     break;
4066   }
4067   case k_Register:
4068     OS << "<register " << RegName(getReg()) << ">";
4069     break;
4070   case k_ShifterImmediate:
4071     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
4072        << " #" << ShifterImm.Imm << ">";
4073     break;
4074   case k_ShiftedRegister:
4075     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4076        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
4077        << RegName(RegShiftedReg.ShiftReg) << ">";
4078     break;
4079   case k_ShiftedImmediate:
4080     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4081        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4082        << RegShiftedImm.ShiftImm << ">";
4083     break;
4084   case k_RotateImmediate:
4085     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4086     break;
4087   case k_ModifiedImmediate:
4088     OS << "<mod_imm #" << ModImm.Bits << ", #"
4089        <<  ModImm.Rot << ")>";
4090     break;
4091   case k_ConstantPoolImmediate:
4092     OS << "<constant_pool_imm #" << *getConstantPoolImm();
4093     break;
4094   case k_BitfieldDescriptor:
4095     OS << "<bitfield " << "lsb: " << Bitfield.LSB
4096        << ", width: " << Bitfield.Width << ">";
4097     break;
4098   case k_RegisterList:
4099   case k_RegisterListWithAPSR:
4100   case k_DPRRegisterList:
4101   case k_SPRRegisterList:
4102   case k_FPSRegisterListWithVPR:
4103   case k_FPDRegisterListWithVPR: {
4104     OS << "<register_list ";
4105 
4106     const SmallVectorImpl<unsigned> &RegList = getRegList();
4107     for (SmallVectorImpl<unsigned>::const_iterator
4108            I = RegList.begin(), E = RegList.end(); I != E; ) {
4109       OS << RegName(*I);
4110       if (++I < E) OS << ", ";
4111     }
4112 
4113     OS << ">";
4114     break;
4115   }
4116   case k_VectorList:
4117     OS << "<vector_list " << VectorList.Count << " * "
4118        << RegName(VectorList.RegNum) << ">";
4119     break;
4120   case k_VectorListAllLanes:
4121     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4122        << RegName(VectorList.RegNum) << ">";
4123     break;
4124   case k_VectorListIndexed:
4125     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4126        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4127     break;
4128   case k_Token:
4129     OS << "'" << getToken() << "'";
4130     break;
4131   case k_VectorIndex:
4132     OS << "<vectorindex " << getVectorIndex() << ">";
4133     break;
4134   }
4135 }
4136 
4137 /// @name Auto-generated Match Functions
4138 /// {
4139 
4140 static MCRegister MatchRegisterName(StringRef Name);
4141 
4142 /// }
4143 
isDataTypeToken(StringRef Tok)4144 static bool isDataTypeToken(StringRef Tok) {
4145   static const DenseSet<StringRef> DataTypes{
4146       ".8",  ".16",  ".32",  ".64",  ".i8", ".i16", ".i32", ".i64",
4147       ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
4148       ".p8", ".p16", ".f32", ".f64", ".f",  ".d"};
4149   return DataTypes.contains(Tok);
4150 }
4151 
getMnemonicOpsEndInd(const OperandVector & Operands)4152 static unsigned getMnemonicOpsEndInd(const OperandVector &Operands) {
4153   unsigned MnemonicOpsEndInd = 1;
4154   // Special case for CPS which has a Mnemonic side token for possibly storing
4155   // ie/id variant
4156   if (Operands[0]->isToken() &&
4157       static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
4158     if (Operands.size() > 1 && Operands[1]->isImm() &&
4159         static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
4160             llvm::MCExpr::Constant &&
4161         (dyn_cast<MCConstantExpr>(
4162              static_cast<ARMOperand &>(*Operands[1]).getImm())
4163                  ->getValue() == ARM_PROC::IE ||
4164          dyn_cast<MCConstantExpr>(
4165              static_cast<ARMOperand &>(*Operands[1]).getImm())
4166                  ->getValue() == ARM_PROC::ID))
4167       ++MnemonicOpsEndInd;
4168   }
4169 
4170   // In some circumstances the condition code moves to the right
4171   bool RHSCondCode = false;
4172   while (MnemonicOpsEndInd < Operands.size()) {
4173     auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
4174     // Special case for it instructions which have a condition code on the RHS
4175     if (Op.isITMask()) {
4176       RHSCondCode = true;
4177       MnemonicOpsEndInd++;
4178     } else if (Op.isToken() &&
4179                (
4180                    // There are several special cases not covered by
4181                    // isDataTypeToken
4182                    Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
4183                    Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
4184                    isDataTypeToken(Op.getToken()))) {
4185       // In the mnemonic operators the cond code must always precede the data
4186       // type. So we can now safely assume any subsequent cond code is on the
4187       // RHS. As is the case for VCMP and VPT.
4188       RHSCondCode = true;
4189       MnemonicOpsEndInd++;
4190     }
4191     // Skip all mnemonic operator types
4192     else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
4193              Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
4194       MnemonicOpsEndInd++;
4195     else
4196       break;
4197   }
4198   return MnemonicOpsEndInd;
4199 }
4200 
parseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)4201 bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4202                                  SMLoc &EndLoc) {
4203   const AsmToken &Tok = getParser().getTok();
4204   StartLoc = Tok.getLoc();
4205   EndLoc = Tok.getEndLoc();
4206   Reg = tryParseRegister();
4207 
4208   return Reg == (unsigned)-1;
4209 }
4210 
tryParseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)4211 ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4212                                            SMLoc &EndLoc) {
4213   if (parseRegister(Reg, StartLoc, EndLoc))
4214     return ParseStatus::NoMatch;
4215   return ParseStatus::Success;
4216 }
4217 
4218 /// Try to parse a register name.  The token must be an Identifier when called,
4219 /// and if it is a register name the token is eaten and the register number is
4220 /// returned.  Otherwise return -1.
tryParseRegister(bool AllowOutOfBoundReg)4221 int ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4222   MCAsmParser &Parser = getParser();
4223   const AsmToken &Tok = Parser.getTok();
4224   if (Tok.isNot(AsmToken::Identifier)) return -1;
4225 
4226   std::string lowerCase = Tok.getString().lower();
4227   unsigned RegNum = MatchRegisterName(lowerCase);
4228   if (!RegNum) {
4229     RegNum = StringSwitch<unsigned>(lowerCase)
4230       .Case("r13", ARM::SP)
4231       .Case("r14", ARM::LR)
4232       .Case("r15", ARM::PC)
4233       .Case("ip", ARM::R12)
4234       // Additional register name aliases for 'gas' compatibility.
4235       .Case("a1", ARM::R0)
4236       .Case("a2", ARM::R1)
4237       .Case("a3", ARM::R2)
4238       .Case("a4", ARM::R3)
4239       .Case("v1", ARM::R4)
4240       .Case("v2", ARM::R5)
4241       .Case("v3", ARM::R6)
4242       .Case("v4", ARM::R7)
4243       .Case("v5", ARM::R8)
4244       .Case("v6", ARM::R9)
4245       .Case("v7", ARM::R10)
4246       .Case("v8", ARM::R11)
4247       .Case("sb", ARM::R9)
4248       .Case("sl", ARM::R10)
4249       .Case("fp", ARM::R11)
4250       .Default(0);
4251   }
4252   if (!RegNum) {
4253     // Check for aliases registered via .req. Canonicalize to lower case.
4254     // That's more consistent since register names are case insensitive, and
4255     // it's how the original entry was passed in from MC/MCParser/AsmParser.
4256     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4257     // If no match, return failure.
4258     if (Entry == RegisterReqs.end())
4259       return -1;
4260     Parser.Lex(); // Eat identifier token.
4261     return Entry->getValue();
4262   }
4263 
4264   // Some FPUs only have 16 D registers, so D16-D31 are invalid
4265   if (!AllowOutOfBoundReg && !hasD32() && RegNum >= ARM::D16 &&
4266       RegNum <= ARM::D31)
4267     return -1;
4268 
4269   Parser.Lex(); // Eat identifier token.
4270 
4271   return RegNum;
4272 }
4273 
tryParseShiftToken()4274 std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4275   MCAsmParser &Parser = getParser();
4276   const AsmToken &Tok = Parser.getTok();
4277   if (Tok.isNot(AsmToken::Identifier))
4278     return std::nullopt;
4279 
4280   std::string lowerCase = Tok.getString().lower();
4281   return StringSwitch<std::optional<ARM_AM::ShiftOpc>>(lowerCase)
4282       .Case("asl", ARM_AM::lsl)
4283       .Case("lsl", ARM_AM::lsl)
4284       .Case("lsr", ARM_AM::lsr)
4285       .Case("asr", ARM_AM::asr)
4286       .Case("ror", ARM_AM::ror)
4287       .Case("rrx", ARM_AM::rrx)
4288       .Default(std::nullopt);
4289 }
4290 
4291 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
4292 // If a recoverable error occurs, return 1. If an irrecoverable error
4293 // occurs, return -1. An irrecoverable error is one where tokens have been
4294 // consumed in the process of trying to parse the shifter (i.e., when it is
4295 // indeed a shifter operand, but malformed).
tryParseShiftRegister(OperandVector & Operands)4296 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4297   MCAsmParser &Parser = getParser();
4298   SMLoc S = Parser.getTok().getLoc();
4299 
4300   auto ShiftTyOpt = tryParseShiftToken();
4301   if (ShiftTyOpt == std::nullopt)
4302     return 1;
4303   auto ShiftTy = ShiftTyOpt.value();
4304 
4305   Parser.Lex(); // Eat the operator.
4306 
4307   // The source register for the shift has already been added to the
4308   // operand list, so we need to pop it off and combine it into the shifted
4309   // register operand instead.
4310   std::unique_ptr<ARMOperand> PrevOp(
4311       (ARMOperand *)Operands.pop_back_val().release());
4312   if (!PrevOp->isReg())
4313     return Error(PrevOp->getStartLoc(), "shift must be of a register");
4314   int SrcReg = PrevOp->getReg();
4315 
4316   SMLoc EndLoc;
4317   int64_t Imm = 0;
4318   int ShiftReg = 0;
4319   if (ShiftTy == ARM_AM::rrx) {
4320     // RRX Doesn't have an explicit shift amount. The encoder expects
4321     // the shift register to be the same as the source register. Seems odd,
4322     // but OK.
4323     ShiftReg = SrcReg;
4324   } else {
4325     // Figure out if this is shifted by a constant or a register (for non-RRX).
4326     if (Parser.getTok().is(AsmToken::Hash) ||
4327         Parser.getTok().is(AsmToken::Dollar)) {
4328       Parser.Lex(); // Eat hash.
4329       SMLoc ImmLoc = Parser.getTok().getLoc();
4330       const MCExpr *ShiftExpr = nullptr;
4331       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4332         Error(ImmLoc, "invalid immediate shift value");
4333         return -1;
4334       }
4335       // The expression must be evaluatable as an immediate.
4336       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4337       if (!CE) {
4338         Error(ImmLoc, "invalid immediate shift value");
4339         return -1;
4340       }
4341       // Range check the immediate.
4342       // lsl, ror: 0 <= imm <= 31
4343       // lsr, asr: 0 <= imm <= 32
4344       Imm = CE->getValue();
4345       if (Imm < 0 ||
4346           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4347           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4348         Error(ImmLoc, "immediate shift value out of range");
4349         return -1;
4350       }
4351       // shift by zero is a nop. Always send it through as lsl.
4352       // ('as' compatibility)
4353       if (Imm == 0)
4354         ShiftTy = ARM_AM::lsl;
4355     } else if (Parser.getTok().is(AsmToken::Identifier)) {
4356       SMLoc L = Parser.getTok().getLoc();
4357       EndLoc = Parser.getTok().getEndLoc();
4358       ShiftReg = tryParseRegister();
4359       if (ShiftReg == -1) {
4360         Error(L, "expected immediate or register in shift operand");
4361         return -1;
4362       }
4363     } else {
4364       Error(Parser.getTok().getLoc(),
4365             "expected immediate or register in shift operand");
4366       return -1;
4367     }
4368   }
4369 
4370   if (ShiftReg && ShiftTy != ARM_AM::rrx)
4371     Operands.push_back(ARMOperand::CreateShiftedRegister(
4372         ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *this));
4373   else
4374     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4375                                                           S, EndLoc, *this));
4376 
4377   return 0;
4378 }
4379 
4380 /// Try to parse a register name.  The token must be an Identifier when called.
4381 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4382 /// if there is a "writeback". 'true' if it's not a register.
4383 ///
4384 /// TODO this is likely to change to allow different register types and or to
4385 /// parse for a specific register type.
tryParseRegisterWithWriteBack(OperandVector & Operands)4386 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4387   MCAsmParser &Parser = getParser();
4388   SMLoc RegStartLoc = Parser.getTok().getLoc();
4389   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4390   int RegNo = tryParseRegister();
4391   if (RegNo == -1)
4392     return true;
4393 
4394   Operands.push_back(
4395       ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc, *this));
4396 
4397   const AsmToken &ExclaimTok = Parser.getTok();
4398   if (ExclaimTok.is(AsmToken::Exclaim)) {
4399     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4400                                                ExclaimTok.getLoc(), *this));
4401     Parser.Lex(); // Eat exclaim token
4402     return false;
4403   }
4404 
4405   // Also check for an index operand. This is only legal for vector registers,
4406   // but that'll get caught OK in operand matching, so we don't need to
4407   // explicitly filter everything else out here.
4408   if (Parser.getTok().is(AsmToken::LBrac)) {
4409     SMLoc SIdx = Parser.getTok().getLoc();
4410     Parser.Lex(); // Eat left bracket token.
4411 
4412     const MCExpr *ImmVal;
4413     if (getParser().parseExpression(ImmVal))
4414       return true;
4415     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4416     if (!MCE)
4417       return TokError("immediate value expected for vector index");
4418 
4419     if (Parser.getTok().isNot(AsmToken::RBrac))
4420       return Error(Parser.getTok().getLoc(), "']' expected");
4421 
4422     SMLoc E = Parser.getTok().getEndLoc();
4423     Parser.Lex(); // Eat right bracket token.
4424 
4425     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), SIdx, E,
4426                                                      getContext(), *this));
4427   }
4428 
4429   return false;
4430 }
4431 
4432 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4433 /// instruction with a symbolic operand name.
4434 /// We accept "crN" syntax for GAS compatibility.
4435 /// <operand-name> ::= <prefix><number>
4436 /// If CoprocOp is 'c', then:
4437 ///   <prefix> ::= c | cr
4438 /// If CoprocOp is 'p', then :
4439 ///   <prefix> ::= p
4440 /// <number> ::= integer in range [0, 15]
MatchCoprocessorOperandName(StringRef Name,char CoprocOp)4441 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4442   // Use the same layout as the tablegen'erated register name matcher. Ugly,
4443   // but efficient.
4444   if (Name.size() < 2 || Name[0] != CoprocOp)
4445     return -1;
4446   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4447 
4448   switch (Name.size()) {
4449   default: return -1;
4450   case 1:
4451     switch (Name[0]) {
4452     default:  return -1;
4453     case '0': return 0;
4454     case '1': return 1;
4455     case '2': return 2;
4456     case '3': return 3;
4457     case '4': return 4;
4458     case '5': return 5;
4459     case '6': return 6;
4460     case '7': return 7;
4461     case '8': return 8;
4462     case '9': return 9;
4463     }
4464   case 2:
4465     if (Name[0] != '1')
4466       return -1;
4467     switch (Name[1]) {
4468     default:  return -1;
4469     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4470     // However, old cores (v5/v6) did use them in that way.
4471     case '0': return 10;
4472     case '1': return 11;
4473     case '2': return 12;
4474     case '3': return 13;
4475     case '4': return 14;
4476     case '5': return 15;
4477     }
4478   }
4479 }
4480 
4481 /// parseITCondCode - Try to parse a condition code for an IT instruction.
parseITCondCode(OperandVector & Operands)4482 ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4483   MCAsmParser &Parser = getParser();
4484   SMLoc S = Parser.getTok().getLoc();
4485   const AsmToken &Tok = Parser.getTok();
4486   if (!Tok.is(AsmToken::Identifier))
4487     return ParseStatus::NoMatch;
4488   unsigned CC = ARMCondCodeFromString(Tok.getString());
4489   if (CC == ~0U)
4490     return ParseStatus::NoMatch;
4491   Parser.Lex(); // Eat the token.
4492 
4493   Operands.push_back(
4494       ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S, *this));
4495 
4496   return ParseStatus::Success;
4497 }
4498 
4499 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4500 /// token must be an Identifier when called, and if it is a coprocessor
4501 /// number, the token is eaten and the operand is added to the operand list.
parseCoprocNumOperand(OperandVector & Operands)4502 ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4503   MCAsmParser &Parser = getParser();
4504   SMLoc S = Parser.getTok().getLoc();
4505   const AsmToken &Tok = Parser.getTok();
4506   if (Tok.isNot(AsmToken::Identifier))
4507     return ParseStatus::NoMatch;
4508 
4509   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4510   if (Num == -1)
4511     return ParseStatus::NoMatch;
4512   if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4513     return ParseStatus::NoMatch;
4514 
4515   Parser.Lex(); // Eat identifier token.
4516   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *this));
4517   return ParseStatus::Success;
4518 }
4519 
4520 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4521 /// token must be an Identifier when called, and if it is a coprocessor
4522 /// number, the token is eaten and the operand is added to the operand list.
parseCoprocRegOperand(OperandVector & Operands)4523 ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4524   MCAsmParser &Parser = getParser();
4525   SMLoc S = Parser.getTok().getLoc();
4526   const AsmToken &Tok = Parser.getTok();
4527   if (Tok.isNot(AsmToken::Identifier))
4528     return ParseStatus::NoMatch;
4529 
4530   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4531   if (Reg == -1)
4532     return ParseStatus::NoMatch;
4533 
4534   Parser.Lex(); // Eat identifier token.
4535   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S, *this));
4536   return ParseStatus::Success;
4537 }
4538 
4539 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4540 /// coproc_option : '{' imm0_255 '}'
parseCoprocOptionOperand(OperandVector & Operands)4541 ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4542   MCAsmParser &Parser = getParser();
4543   SMLoc S = Parser.getTok().getLoc();
4544 
4545   // If this isn't a '{', this isn't a coprocessor immediate operand.
4546   if (Parser.getTok().isNot(AsmToken::LCurly))
4547     return ParseStatus::NoMatch;
4548   Parser.Lex(); // Eat the '{'
4549 
4550   const MCExpr *Expr;
4551   SMLoc Loc = Parser.getTok().getLoc();
4552   if (getParser().parseExpression(Expr))
4553     return Error(Loc, "illegal expression");
4554   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4555   if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4556     return Error(Loc,
4557                  "coprocessor option must be an immediate in range [0, 255]");
4558   int Val = CE->getValue();
4559 
4560   // Check for and consume the closing '}'
4561   if (Parser.getTok().isNot(AsmToken::RCurly))
4562     return ParseStatus::Failure;
4563   SMLoc E = Parser.getTok().getEndLoc();
4564   Parser.Lex(); // Eat the '}'
4565 
4566   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E, *this));
4567   return ParseStatus::Success;
4568 }
4569 
4570 // For register list parsing, we need to map from raw GPR register numbering
4571 // to the enumeration values. The enumeration values aren't sorted by
4572 // register number due to our using "sp", "lr" and "pc" as canonical names.
getNextRegister(unsigned Reg)4573 static unsigned getNextRegister(unsigned Reg) {
4574   // If this is a GPR, we need to do it manually, otherwise we can rely
4575   // on the sort ordering of the enumeration since the other reg-classes
4576   // are sane.
4577   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4578     return Reg + 1;
4579   switch(Reg) {
4580   default: llvm_unreachable("Invalid GPR number!");
4581   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
4582   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
4583   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
4584   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
4585   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
4586   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4587   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
4588   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
4589   }
4590 }
4591 
4592 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4593 // success, or false, if duplicate encoding found.
4594 static bool
insertNoDuplicates(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,unsigned Enc,unsigned Reg)4595 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4596                    unsigned Enc, unsigned Reg) {
4597   Regs.emplace_back(Enc, Reg);
4598   for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4599     if (J->first == Enc) {
4600       Regs.erase(J.base());
4601       return false;
4602     }
4603     if (J->first < Enc)
4604       break;
4605     std::swap(*I, *J);
4606   }
4607   return true;
4608 }
4609 
4610 /// Parse a register list.
parseRegisterList(OperandVector & Operands,bool EnforceOrder,bool AllowRAAC,bool AllowOutOfBoundReg)4611 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4612                                      bool AllowRAAC, bool AllowOutOfBoundReg) {
4613   MCAsmParser &Parser = getParser();
4614   if (Parser.getTok().isNot(AsmToken::LCurly))
4615     return TokError("Token is not a Left Curly Brace");
4616   SMLoc S = Parser.getTok().getLoc();
4617   Parser.Lex(); // Eat '{' token.
4618   SMLoc RegLoc = Parser.getTok().getLoc();
4619 
4620   // Check the first register in the list to see what register class
4621   // this is a list of.
4622   int Reg = tryParseRegister();
4623   if (Reg == -1)
4624     return Error(RegLoc, "register expected");
4625   if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4626     return Error(RegLoc, "pseudo-register not allowed");
4627   // The reglist instructions have at most 16 registers, so reserve
4628   // space for that many.
4629   int EReg = 0;
4630   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4631 
4632   // Allow Q regs and just interpret them as the two D sub-registers.
4633   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4634     Reg = getDRegFromQReg(Reg);
4635     EReg = MRI->getEncodingValue(Reg);
4636     Registers.emplace_back(EReg, Reg);
4637     ++Reg;
4638   }
4639   const MCRegisterClass *RC;
4640   if (Reg == ARM::RA_AUTH_CODE ||
4641       ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4642     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4643   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4644     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4645   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4646     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4647   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4648     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4649   else
4650     return Error(RegLoc, "invalid register in register list");
4651 
4652   // Store the register.
4653   EReg = MRI->getEncodingValue(Reg);
4654   Registers.emplace_back(EReg, Reg);
4655 
4656   // This starts immediately after the first register token in the list,
4657   // so we can see either a comma or a minus (range separator) as a legal
4658   // next token.
4659   while (Parser.getTok().is(AsmToken::Comma) ||
4660          Parser.getTok().is(AsmToken::Minus)) {
4661     if (Parser.getTok().is(AsmToken::Minus)) {
4662       if (Reg == ARM::RA_AUTH_CODE)
4663         return Error(RegLoc, "pseudo-register not allowed");
4664       Parser.Lex(); // Eat the minus.
4665       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4666       int EndReg = tryParseRegister(AllowOutOfBoundReg);
4667       if (EndReg == -1)
4668         return Error(AfterMinusLoc, "register expected");
4669       if (EndReg == ARM::RA_AUTH_CODE)
4670         return Error(AfterMinusLoc, "pseudo-register not allowed");
4671       // Allow Q regs and just interpret them as the two D sub-registers.
4672       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4673         EndReg = getDRegFromQReg(EndReg) + 1;
4674       // If the register is the same as the start reg, there's nothing
4675       // more to do.
4676       if (Reg == EndReg)
4677         continue;
4678       // The register must be in the same register class as the first.
4679       if (!RC->contains(Reg))
4680         return Error(AfterMinusLoc, "invalid register in register list");
4681       // Ranges must go from low to high.
4682       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4683         return Error(AfterMinusLoc, "bad range in register list");
4684 
4685       // Add all the registers in the range to the register list.
4686       while (Reg != EndReg) {
4687         Reg = getNextRegister(Reg);
4688         EReg = MRI->getEncodingValue(Reg);
4689         if (!insertNoDuplicates(Registers, EReg, Reg)) {
4690           Warning(AfterMinusLoc, StringRef("duplicated register (") +
4691                                      ARMInstPrinter::getRegisterName(Reg) +
4692                                      ") in register list");
4693         }
4694       }
4695       continue;
4696     }
4697     Parser.Lex(); // Eat the comma.
4698     RegLoc = Parser.getTok().getLoc();
4699     int OldReg = Reg;
4700     const AsmToken RegTok = Parser.getTok();
4701     Reg = tryParseRegister(AllowOutOfBoundReg);
4702     if (Reg == -1)
4703       return Error(RegLoc, "register expected");
4704     if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4705       return Error(RegLoc, "pseudo-register not allowed");
4706     // Allow Q regs and just interpret them as the two D sub-registers.
4707     bool isQReg = false;
4708     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4709       Reg = getDRegFromQReg(Reg);
4710       isQReg = true;
4711     }
4712     if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4713         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4714         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4715       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4716       // subset of GPRRegClassId except it contains APSR as well.
4717       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4718     }
4719     if (Reg == ARM::VPR &&
4720         (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4721          RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4722          RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4723       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4724       EReg = MRI->getEncodingValue(Reg);
4725       if (!insertNoDuplicates(Registers, EReg, Reg)) {
4726         Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4727                             ") in register list");
4728       }
4729       continue;
4730     }
4731     // The register must be in the same register class as the first.
4732     if ((Reg == ARM::RA_AUTH_CODE &&
4733          RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4734         (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4735       return Error(RegLoc, "invalid register in register list");
4736     // In most cases, the list must be monotonically increasing. An
4737     // exception is CLRM, which is order-independent anyway, so
4738     // there's no potential for confusion if you write clrm {r2,r1}
4739     // instead of clrm {r1,r2}.
4740     if (EnforceOrder &&
4741         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4742       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4743         Warning(RegLoc, "register list not in ascending order");
4744       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4745         return Error(RegLoc, "register list not in ascending order");
4746     }
4747     // VFP register lists must also be contiguous.
4748     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4749         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4750         Reg != OldReg + 1)
4751       return Error(RegLoc, "non-contiguous register range");
4752     EReg = MRI->getEncodingValue(Reg);
4753     if (!insertNoDuplicates(Registers, EReg, Reg)) {
4754       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4755                           ") in register list");
4756     }
4757     if (isQReg) {
4758       EReg = MRI->getEncodingValue(++Reg);
4759       Registers.emplace_back(EReg, Reg);
4760     }
4761   }
4762 
4763   if (Parser.getTok().isNot(AsmToken::RCurly))
4764     return Error(Parser.getTok().getLoc(), "'}' expected");
4765   SMLoc E = Parser.getTok().getEndLoc();
4766   Parser.Lex(); // Eat '}' token.
4767 
4768   // Push the register list operand.
4769   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E, *this));
4770 
4771   // The ARM system instruction variants for LDM/STM have a '^' token here.
4772   if (Parser.getTok().is(AsmToken::Caret)) {
4773     Operands.push_back(
4774         ARMOperand::CreateToken("^", Parser.getTok().getLoc(), *this));
4775     Parser.Lex(); // Eat '^' token.
4776   }
4777 
4778   return false;
4779 }
4780 
4781 // Helper function to parse the lane index for vector lists.
parseVectorLane(VectorLaneTy & LaneKind,unsigned & Index,SMLoc & EndLoc)4782 ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4783                                           unsigned &Index, SMLoc &EndLoc) {
4784   MCAsmParser &Parser = getParser();
4785   Index = 0; // Always return a defined index value.
4786   if (Parser.getTok().is(AsmToken::LBrac)) {
4787     Parser.Lex(); // Eat the '['.
4788     if (Parser.getTok().is(AsmToken::RBrac)) {
4789       // "Dn[]" is the 'all lanes' syntax.
4790       LaneKind = AllLanes;
4791       EndLoc = Parser.getTok().getEndLoc();
4792       Parser.Lex(); // Eat the ']'.
4793       return ParseStatus::Success;
4794     }
4795 
4796     // There's an optional '#' token here. Normally there wouldn't be, but
4797     // inline assemble puts one in, and it's friendly to accept that.
4798     if (Parser.getTok().is(AsmToken::Hash))
4799       Parser.Lex(); // Eat '#' or '$'.
4800 
4801     const MCExpr *LaneIndex;
4802     SMLoc Loc = Parser.getTok().getLoc();
4803     if (getParser().parseExpression(LaneIndex))
4804       return Error(Loc, "illegal expression");
4805     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4806     if (!CE)
4807       return Error(Loc, "lane index must be empty or an integer");
4808     if (Parser.getTok().isNot(AsmToken::RBrac))
4809       return Error(Parser.getTok().getLoc(), "']' expected");
4810     EndLoc = Parser.getTok().getEndLoc();
4811     Parser.Lex(); // Eat the ']'.
4812     int64_t Val = CE->getValue();
4813 
4814     // FIXME: Make this range check context sensitive for .8, .16, .32.
4815     if (Val < 0 || Val > 7)
4816       return Error(Parser.getTok().getLoc(), "lane index out of range");
4817     Index = Val;
4818     LaneKind = IndexedLane;
4819     return ParseStatus::Success;
4820   }
4821   LaneKind = NoLanes;
4822   return ParseStatus::Success;
4823 }
4824 
4825 // parse a vector register list
parseVectorList(OperandVector & Operands)4826 ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4827   MCAsmParser &Parser = getParser();
4828   VectorLaneTy LaneKind;
4829   unsigned LaneIndex;
4830   SMLoc S = Parser.getTok().getLoc();
4831   // As an extension (to match gas), support a plain D register or Q register
4832   // (without encosing curly braces) as a single or double entry list,
4833   // respectively.
4834   // If there is no lane supplied, just parse as a register and
4835   // use the custom matcher to convert to list if necessary
4836   if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4837     SMLoc E = Parser.getTok().getEndLoc();
4838     int Reg = tryParseRegister();
4839     if (Reg == -1)
4840       return ParseStatus::NoMatch;
4841     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4842       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4843       if (!Res.isSuccess())
4844         return Res;
4845       switch (LaneKind) {
4846       case NoLanes:
4847         Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4848         break;
4849       case AllLanes:
4850         Operands.push_back(
4851             ARMOperand::CreateVectorListAllLanes(Reg, 1, false, S, E, *this));
4852         break;
4853       case IndexedLane:
4854         Operands.push_back(ARMOperand::CreateVectorListIndexed(
4855             Reg, 1, LaneIndex, false, S, E, *this));
4856         break;
4857       }
4858       return ParseStatus::Success;
4859     }
4860     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4861       Reg = getDRegFromQReg(Reg);
4862       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4863       if (!Res.isSuccess())
4864         return Res;
4865       switch (LaneKind) {
4866       case NoLanes:
4867         Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4868         break;
4869       case AllLanes:
4870         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4871                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4872         Operands.push_back(
4873             ARMOperand::CreateVectorListAllLanes(Reg, 2, false, S, E, *this));
4874         break;
4875       case IndexedLane:
4876         Operands.push_back(ARMOperand::CreateVectorListIndexed(
4877             Reg, 2, LaneIndex, false, S, E, *this));
4878         break;
4879       }
4880       return ParseStatus::Success;
4881     }
4882     Operands.push_back(ARMOperand::CreateReg(Reg, S, E, *this));
4883     return ParseStatus::Success;
4884   }
4885 
4886   if (Parser.getTok().isNot(AsmToken::LCurly))
4887     return ParseStatus::NoMatch;
4888 
4889   Parser.Lex(); // Eat '{' token.
4890   SMLoc RegLoc = Parser.getTok().getLoc();
4891 
4892   int Reg = tryParseRegister();
4893   if (Reg == -1)
4894     return Error(RegLoc, "register expected");
4895   unsigned Count = 1;
4896   int Spacing = 0;
4897   unsigned FirstReg = Reg;
4898 
4899   if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4900     return Error(Parser.getTok().getLoc(),
4901                  "vector register in range Q0-Q7 expected");
4902   // The list is of D registers, but we also allow Q regs and just interpret
4903   // them as the two D sub-registers.
4904   else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4905     FirstReg = Reg = getDRegFromQReg(Reg);
4906     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4907                  // it's ambiguous with four-register single spaced.
4908     ++Reg;
4909     ++Count;
4910   }
4911 
4912   SMLoc E;
4913   if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4914     return ParseStatus::Failure;
4915 
4916   while (Parser.getTok().is(AsmToken::Comma) ||
4917          Parser.getTok().is(AsmToken::Minus)) {
4918     if (Parser.getTok().is(AsmToken::Minus)) {
4919       if (!Spacing)
4920         Spacing = 1; // Register range implies a single spaced list.
4921       else if (Spacing == 2)
4922         return Error(Parser.getTok().getLoc(),
4923                      "sequential registers in double spaced list");
4924       Parser.Lex(); // Eat the minus.
4925       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4926       int EndReg = tryParseRegister();
4927       if (EndReg == -1)
4928         return Error(AfterMinusLoc, "register expected");
4929       // Allow Q regs and just interpret them as the two D sub-registers.
4930       if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4931         EndReg = getDRegFromQReg(EndReg) + 1;
4932       // If the register is the same as the start reg, there's nothing
4933       // more to do.
4934       if (Reg == EndReg)
4935         continue;
4936       // The register must be in the same register class as the first.
4937       if ((hasMVE() &&
4938            !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4939           (!hasMVE() &&
4940            !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4941         return Error(AfterMinusLoc, "invalid register in register list");
4942       // Ranges must go from low to high.
4943       if (Reg > EndReg)
4944         return Error(AfterMinusLoc, "bad range in register list");
4945       // Parse the lane specifier if present.
4946       VectorLaneTy NextLaneKind;
4947       unsigned NextLaneIndex;
4948       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4949         return ParseStatus::Failure;
4950       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4951         return Error(AfterMinusLoc, "mismatched lane index in register list");
4952 
4953       // Add all the registers in the range to the register list.
4954       Count += EndReg - Reg;
4955       Reg = EndReg;
4956       continue;
4957     }
4958     Parser.Lex(); // Eat the comma.
4959     RegLoc = Parser.getTok().getLoc();
4960     int OldReg = Reg;
4961     Reg = tryParseRegister();
4962     if (Reg == -1)
4963       return Error(RegLoc, "register expected");
4964 
4965     if (hasMVE()) {
4966       if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4967         return Error(RegLoc, "vector register in range Q0-Q7 expected");
4968       Spacing = 1;
4969     }
4970     // vector register lists must be contiguous.
4971     // It's OK to use the enumeration values directly here rather, as the
4972     // VFP register classes have the enum sorted properly.
4973     //
4974     // The list is of D registers, but we also allow Q regs and just interpret
4975     // them as the two D sub-registers.
4976     else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4977       if (!Spacing)
4978         Spacing = 1; // Register range implies a single spaced list.
4979       else if (Spacing == 2)
4980         return Error(
4981             RegLoc,
4982             "invalid register in double-spaced list (must be 'D' register')");
4983       Reg = getDRegFromQReg(Reg);
4984       if (Reg != OldReg + 1)
4985         return Error(RegLoc, "non-contiguous register range");
4986       ++Reg;
4987       Count += 2;
4988       // Parse the lane specifier if present.
4989       VectorLaneTy NextLaneKind;
4990       unsigned NextLaneIndex;
4991       SMLoc LaneLoc = Parser.getTok().getLoc();
4992       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4993         return ParseStatus::Failure;
4994       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4995         return Error(LaneLoc, "mismatched lane index in register list");
4996       continue;
4997     }
4998     // Normal D register.
4999     // Figure out the register spacing (single or double) of the list if
5000     // we don't know it already.
5001     if (!Spacing)
5002       Spacing = 1 + (Reg == OldReg + 2);
5003 
5004     // Just check that it's contiguous and keep going.
5005     if (Reg != OldReg + Spacing)
5006       return Error(RegLoc, "non-contiguous register range");
5007     ++Count;
5008     // Parse the lane specifier if present.
5009     VectorLaneTy NextLaneKind;
5010     unsigned NextLaneIndex;
5011     SMLoc EndLoc = Parser.getTok().getLoc();
5012     if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
5013       return ParseStatus::Failure;
5014     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5015       return Error(EndLoc, "mismatched lane index in register list");
5016   }
5017 
5018   if (Parser.getTok().isNot(AsmToken::RCurly))
5019     return Error(Parser.getTok().getLoc(), "'}' expected");
5020   E = Parser.getTok().getEndLoc();
5021   Parser.Lex(); // Eat '}' token.
5022 
5023   switch (LaneKind) {
5024   case NoLanes:
5025   case AllLanes: {
5026     // Two-register operands have been converted to the
5027     // composite register classes.
5028     if (Count == 2 && !hasMVE()) {
5029       const MCRegisterClass *RC = (Spacing == 1) ?
5030         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5031         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
5032       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
5033     }
5034     auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5035                    ARMOperand::CreateVectorListAllLanes);
5036     Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E, *this));
5037     break;
5038   }
5039   case IndexedLane:
5040     Operands.push_back(ARMOperand::CreateVectorListIndexed(
5041         FirstReg, Count, LaneIndex, (Spacing == 2), S, E, *this));
5042     break;
5043   }
5044   return ParseStatus::Success;
5045 }
5046 
5047 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
parseMemBarrierOptOperand(OperandVector & Operands)5048 ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
5049   MCAsmParser &Parser = getParser();
5050   SMLoc S = Parser.getTok().getLoc();
5051   const AsmToken &Tok = Parser.getTok();
5052   unsigned Opt;
5053 
5054   if (Tok.is(AsmToken::Identifier)) {
5055     StringRef OptStr = Tok.getString();
5056 
5057     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
5058       .Case("sy",    ARM_MB::SY)
5059       .Case("st",    ARM_MB::ST)
5060       .Case("ld",    ARM_MB::LD)
5061       .Case("sh",    ARM_MB::ISH)
5062       .Case("ish",   ARM_MB::ISH)
5063       .Case("shst",  ARM_MB::ISHST)
5064       .Case("ishst", ARM_MB::ISHST)
5065       .Case("ishld", ARM_MB::ISHLD)
5066       .Case("nsh",   ARM_MB::NSH)
5067       .Case("un",    ARM_MB::NSH)
5068       .Case("nshst", ARM_MB::NSHST)
5069       .Case("nshld", ARM_MB::NSHLD)
5070       .Case("unst",  ARM_MB::NSHST)
5071       .Case("osh",   ARM_MB::OSH)
5072       .Case("oshst", ARM_MB::OSHST)
5073       .Case("oshld", ARM_MB::OSHLD)
5074       .Default(~0U);
5075 
5076     // ishld, oshld, nshld and ld are only available from ARMv8.
5077     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
5078                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
5079       Opt = ~0U;
5080 
5081     if (Opt == ~0U)
5082       return ParseStatus::NoMatch;
5083 
5084     Parser.Lex(); // Eat identifier token.
5085   } else if (Tok.is(AsmToken::Hash) ||
5086              Tok.is(AsmToken::Dollar) ||
5087              Tok.is(AsmToken::Integer)) {
5088     if (Parser.getTok().isNot(AsmToken::Integer))
5089       Parser.Lex(); // Eat '#' or '$'.
5090     SMLoc Loc = Parser.getTok().getLoc();
5091 
5092     const MCExpr *MemBarrierID;
5093     if (getParser().parseExpression(MemBarrierID))
5094       return Error(Loc, "illegal expression");
5095 
5096     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
5097     if (!CE)
5098       return Error(Loc, "constant expression expected");
5099 
5100     int Val = CE->getValue();
5101     if (Val & ~0xf)
5102       return Error(Loc, "immediate value out of range");
5103 
5104     Opt = ARM_MB::RESERVED_0 + Val;
5105   } else
5106     return ParseStatus::Failure;
5107 
5108   Operands.push_back(
5109       ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S, *this));
5110   return ParseStatus::Success;
5111 }
5112 
5113 ParseStatus
parseTraceSyncBarrierOptOperand(OperandVector & Operands)5114 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5115   MCAsmParser &Parser = getParser();
5116   SMLoc S = Parser.getTok().getLoc();
5117   const AsmToken &Tok = Parser.getTok();
5118 
5119   if (Tok.isNot(AsmToken::Identifier))
5120     return ParseStatus::NoMatch;
5121 
5122   if (!Tok.getString().equals_insensitive("csync"))
5123     return ParseStatus::NoMatch;
5124 
5125   Parser.Lex(); // Eat identifier token.
5126 
5127   Operands.push_back(
5128       ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S, *this));
5129   return ParseStatus::Success;
5130 }
5131 
5132 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5133 ParseStatus
parseInstSyncBarrierOptOperand(OperandVector & Operands)5134 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5135   MCAsmParser &Parser = getParser();
5136   SMLoc S = Parser.getTok().getLoc();
5137   const AsmToken &Tok = Parser.getTok();
5138   unsigned Opt;
5139 
5140   if (Tok.is(AsmToken::Identifier)) {
5141     StringRef OptStr = Tok.getString();
5142 
5143     if (OptStr.equals_insensitive("sy"))
5144       Opt = ARM_ISB::SY;
5145     else
5146       return ParseStatus::NoMatch;
5147 
5148     Parser.Lex(); // Eat identifier token.
5149   } else if (Tok.is(AsmToken::Hash) ||
5150              Tok.is(AsmToken::Dollar) ||
5151              Tok.is(AsmToken::Integer)) {
5152     if (Parser.getTok().isNot(AsmToken::Integer))
5153       Parser.Lex(); // Eat '#' or '$'.
5154     SMLoc Loc = Parser.getTok().getLoc();
5155 
5156     const MCExpr *ISBarrierID;
5157     if (getParser().parseExpression(ISBarrierID))
5158       return Error(Loc, "illegal expression");
5159 
5160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5161     if (!CE)
5162       return Error(Loc, "constant expression expected");
5163 
5164     int Val = CE->getValue();
5165     if (Val & ~0xf)
5166       return Error(Loc, "immediate value out of range");
5167 
5168     Opt = ARM_ISB::RESERVED_0 + Val;
5169   } else
5170     return ParseStatus::Failure;
5171 
5172   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5173       (ARM_ISB::InstSyncBOpt)Opt, S, *this));
5174   return ParseStatus::Success;
5175 }
5176 
5177 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
parseProcIFlagsOperand(OperandVector & Operands)5178 ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5179   MCAsmParser &Parser = getParser();
5180   SMLoc S = Parser.getTok().getLoc();
5181   const AsmToken &Tok = Parser.getTok();
5182   if (!Tok.is(AsmToken::Identifier))
5183     return ParseStatus::NoMatch;
5184   StringRef IFlagsStr = Tok.getString();
5185 
5186   // An iflags string of "none" is interpreted to mean that none of the AIF
5187   // bits are set.  Not a terribly useful instruction, but a valid encoding.
5188   unsigned IFlags = 0;
5189   if (IFlagsStr != "none") {
5190         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5191       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5192         .Case("a", ARM_PROC::A)
5193         .Case("i", ARM_PROC::I)
5194         .Case("f", ARM_PROC::F)
5195         .Default(~0U);
5196 
5197       // If some specific iflag is already set, it means that some letter is
5198       // present more than once, this is not acceptable.
5199       if (Flag == ~0U || (IFlags & Flag))
5200         return ParseStatus::NoMatch;
5201 
5202       IFlags |= Flag;
5203     }
5204   }
5205 
5206   Parser.Lex(); // Eat identifier token.
5207   Operands.push_back(
5208       ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S, *this));
5209   return ParseStatus::Success;
5210 }
5211 
5212 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
parseMSRMaskOperand(OperandVector & Operands)5213 ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5214   // Don't parse two MSR registers in a row
5215   if (static_cast<ARMOperand &>(*Operands.back()).isMSRMask() ||
5216       static_cast<ARMOperand &>(*Operands.back()).isBankedReg())
5217     return ParseStatus::NoMatch;
5218   MCAsmParser &Parser = getParser();
5219   SMLoc S = Parser.getTok().getLoc();
5220   const AsmToken &Tok = Parser.getTok();
5221 
5222   if (Tok.is(AsmToken::Integer)) {
5223     int64_t Val = Tok.getIntVal();
5224     if (Val > 255 || Val < 0) {
5225       return ParseStatus::NoMatch;
5226     }
5227     unsigned SYSmvalue = Val & 0xFF;
5228     Parser.Lex();
5229     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
5230     return ParseStatus::Success;
5231   }
5232 
5233   if (!Tok.is(AsmToken::Identifier))
5234     return ParseStatus::NoMatch;
5235   StringRef Mask = Tok.getString();
5236 
5237   if (isMClass()) {
5238     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5239     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5240       return ParseStatus::NoMatch;
5241 
5242     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5243 
5244     Parser.Lex(); // Eat identifier token.
5245     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *this));
5246     return ParseStatus::Success;
5247   }
5248 
5249   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5250   size_t Start = 0, Next = Mask.find('_');
5251   StringRef Flags = "";
5252   std::string SpecReg = Mask.slice(Start, Next).lower();
5253   if (Next != StringRef::npos)
5254     Flags = Mask.slice(Next+1, Mask.size());
5255 
5256   // FlagsVal contains the complete mask:
5257   // 3-0: Mask
5258   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5259   unsigned FlagsVal = 0;
5260 
5261   if (SpecReg == "apsr") {
5262     FlagsVal = StringSwitch<unsigned>(Flags)
5263     .Case("nzcvq",  0x8) // same as CPSR_f
5264     .Case("g",      0x4) // same as CPSR_s
5265     .Case("nzcvqg", 0xc) // same as CPSR_fs
5266     .Default(~0U);
5267 
5268     if (FlagsVal == ~0U) {
5269       if (!Flags.empty())
5270         return ParseStatus::NoMatch;
5271       else
5272         FlagsVal = 8; // No flag
5273     }
5274   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5275     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5276     if (Flags == "all" || Flags == "")
5277       Flags = "fc";
5278     for (int i = 0, e = Flags.size(); i != e; ++i) {
5279       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5280       .Case("c", 1)
5281       .Case("x", 2)
5282       .Case("s", 4)
5283       .Case("f", 8)
5284       .Default(~0U);
5285 
5286       // If some specific flag is already set, it means that some letter is
5287       // present more than once, this is not acceptable.
5288       if (Flag == ~0U || (FlagsVal & Flag))
5289         return ParseStatus::NoMatch;
5290       FlagsVal |= Flag;
5291     }
5292   } else // No match for special register.
5293     return ParseStatus::NoMatch;
5294 
5295   // Special register without flags is NOT equivalent to "fc" flags.
5296   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
5297   // two lines would enable gas compatibility at the expense of breaking
5298   // round-tripping.
5299   //
5300   // if (!FlagsVal)
5301   //  FlagsVal = 0x9;
5302 
5303   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5304   if (SpecReg == "spsr")
5305     FlagsVal |= 16;
5306 
5307   Parser.Lex(); // Eat identifier token.
5308   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *this));
5309   return ParseStatus::Success;
5310 }
5311 
5312 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5313 /// use in the MRS/MSR instructions added to support virtualization.
parseBankedRegOperand(OperandVector & Operands)5314 ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5315   // Don't parse two Banked registers in a row
5316   if (static_cast<ARMOperand &>(*Operands.back()).isBankedReg() ||
5317       static_cast<ARMOperand &>(*Operands.back()).isMSRMask())
5318     return ParseStatus::NoMatch;
5319   MCAsmParser &Parser = getParser();
5320   SMLoc S = Parser.getTok().getLoc();
5321   const AsmToken &Tok = Parser.getTok();
5322   if (!Tok.is(AsmToken::Identifier))
5323     return ParseStatus::NoMatch;
5324   StringRef RegName = Tok.getString();
5325 
5326   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5327   if (!TheReg)
5328     return ParseStatus::NoMatch;
5329   unsigned Encoding = TheReg->Encoding;
5330 
5331   Parser.Lex(); // Eat identifier token.
5332   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S, *this));
5333   return ParseStatus::Success;
5334 }
5335 
5336 // FIXME: Unify the different methods for handling shift operators
5337 // and use TableGen matching mechanisms to do the validation rather than
5338 // separate parsing paths.
parsePKHImm(OperandVector & Operands,ARM_AM::ShiftOpc Op,int Low,int High)5339 ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands,
5340                                       ARM_AM::ShiftOpc Op, int Low, int High) {
5341   MCAsmParser &Parser = getParser();
5342   auto ShiftCodeOpt = tryParseShiftToken();
5343 
5344   if (!ShiftCodeOpt.has_value())
5345     return ParseStatus::NoMatch;
5346   auto ShiftCode = ShiftCodeOpt.value();
5347 
5348   // The wrong shift code has been provided. Can error here as has matched the
5349   // correct operand in this case.
5350   if (ShiftCode != Op)
5351     return Error(Parser.getTok().getLoc(),
5352                  ARM_AM::getShiftOpcStr(Op) + " operand expected.");
5353 
5354   Parser.Lex(); // Eat shift type token.
5355 
5356   // There must be a '#' and a shift amount.
5357   if (Parser.getTok().isNot(AsmToken::Hash) &&
5358       Parser.getTok().isNot(AsmToken::Dollar))
5359     return ParseStatus::NoMatch;
5360   Parser.Lex(); // Eat hash token.
5361 
5362   const MCExpr *ShiftAmount;
5363   SMLoc Loc = Parser.getTok().getLoc();
5364   SMLoc EndLoc;
5365   if (getParser().parseExpression(ShiftAmount, EndLoc))
5366     return Error(Loc, "illegal expression");
5367   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5368   if (!CE)
5369     return Error(Loc, "constant expression expected");
5370   int Val = CE->getValue();
5371   if (Val < Low || Val > High)
5372     return Error(Loc, "immediate value out of range");
5373 
5374   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *this));
5375 
5376   return ParseStatus::Success;
5377 }
5378 
parseSetEndImm(OperandVector & Operands)5379 ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5380   MCAsmParser &Parser = getParser();
5381   const AsmToken &Tok = Parser.getTok();
5382   SMLoc S = Tok.getLoc();
5383   if (Tok.isNot(AsmToken::Identifier))
5384     return Error(S, "'be' or 'le' operand expected");
5385   int Val = StringSwitch<int>(Tok.getString().lower())
5386     .Case("be", 1)
5387     .Case("le", 0)
5388     .Default(-1);
5389   Parser.Lex(); // Eat the token.
5390 
5391   if (Val == -1)
5392     return Error(S, "'be' or 'le' operand expected");
5393   Operands.push_back(ARMOperand::CreateImm(
5394       MCConstantExpr::create(Val, getContext()), S, Tok.getEndLoc(), *this));
5395   return ParseStatus::Success;
5396 }
5397 
5398 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5399 /// instructions. Legal values are:
5400 ///     lsl #n  'n' in [0,31]
5401 ///     asr #n  'n' in [1,32]
5402 ///             n == 32 encoded as n == 0.
parseShifterImm(OperandVector & Operands)5403 ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5404   MCAsmParser &Parser = getParser();
5405   const AsmToken &Tok = Parser.getTok();
5406   SMLoc S = Tok.getLoc();
5407   if (Tok.isNot(AsmToken::Identifier))
5408     return ParseStatus::NoMatch;
5409   StringRef ShiftName = Tok.getString();
5410   bool isASR;
5411   if (ShiftName == "lsl" || ShiftName == "LSL")
5412     isASR = false;
5413   else if (ShiftName == "asr" || ShiftName == "ASR")
5414     isASR = true;
5415   else
5416     return ParseStatus::NoMatch;
5417   Parser.Lex(); // Eat the operator.
5418 
5419   // A '#' and a shift amount.
5420   if (Parser.getTok().isNot(AsmToken::Hash) &&
5421       Parser.getTok().isNot(AsmToken::Dollar))
5422     return Error(Parser.getTok().getLoc(), "'#' expected");
5423   Parser.Lex(); // Eat hash token.
5424   SMLoc ExLoc = Parser.getTok().getLoc();
5425 
5426   const MCExpr *ShiftAmount;
5427   SMLoc EndLoc;
5428   if (getParser().parseExpression(ShiftAmount, EndLoc))
5429     return Error(ExLoc, "malformed shift expression");
5430   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5431   if (!CE)
5432     return Error(ExLoc, "shift amount must be an immediate");
5433 
5434   int64_t Val = CE->getValue();
5435   if (isASR) {
5436     // Shift amount must be in [1,32]
5437     if (Val < 1 || Val > 32)
5438       return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5439     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5440     if (isThumb() && Val == 32)
5441       return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5442     if (Val == 32) Val = 0;
5443   } else {
5444     // Shift amount must be in [1,32]
5445     if (Val < 0 || Val > 31)
5446       return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5447   }
5448 
5449   Operands.push_back(
5450       ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *this));
5451 
5452   return ParseStatus::Success;
5453 }
5454 
5455 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5456 /// of instructions. Legal values are:
5457 ///     ror #n  'n' in {0, 8, 16, 24}
parseRotImm(OperandVector & Operands)5458 ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5459   MCAsmParser &Parser = getParser();
5460   const AsmToken &Tok = Parser.getTok();
5461   SMLoc S = Tok.getLoc();
5462   if (Tok.isNot(AsmToken::Identifier))
5463     return ParseStatus::NoMatch;
5464   StringRef ShiftName = Tok.getString();
5465   if (ShiftName != "ror" && ShiftName != "ROR")
5466     return ParseStatus::NoMatch;
5467   Parser.Lex(); // Eat the operator.
5468 
5469   // A '#' and a rotate amount.
5470   if (Parser.getTok().isNot(AsmToken::Hash) &&
5471       Parser.getTok().isNot(AsmToken::Dollar))
5472     return Error(Parser.getTok().getLoc(), "'#' expected");
5473   Parser.Lex(); // Eat hash token.
5474   SMLoc ExLoc = Parser.getTok().getLoc();
5475 
5476   const MCExpr *ShiftAmount;
5477   SMLoc EndLoc;
5478   if (getParser().parseExpression(ShiftAmount, EndLoc))
5479     return Error(ExLoc, "malformed rotate expression");
5480   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5481   if (!CE)
5482     return Error(ExLoc, "rotate amount must be an immediate");
5483 
5484   int64_t Val = CE->getValue();
5485   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5486   // normally, zero is represented in asm by omitting the rotate operand
5487   // entirely.
5488   if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5489     return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5490 
5491   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *this));
5492 
5493   return ParseStatus::Success;
5494 }
5495 
parseModImm(OperandVector & Operands)5496 ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5497   MCAsmParser &Parser = getParser();
5498   MCAsmLexer &Lexer = getLexer();
5499   int64_t Imm1, Imm2;
5500 
5501   SMLoc S = Parser.getTok().getLoc();
5502 
5503   // 1) A mod_imm operand can appear in the place of a register name:
5504   //   add r0, #mod_imm
5505   //   add r0, r0, #mod_imm
5506   // to correctly handle the latter, we bail out as soon as we see an
5507   // identifier.
5508   //
5509   // 2) Similarly, we do not want to parse into complex operands:
5510   //   mov r0, #mod_imm
5511   //   mov r0, :lower16:(_foo)
5512   if (Parser.getTok().is(AsmToken::Identifier) ||
5513       Parser.getTok().is(AsmToken::Colon))
5514     return ParseStatus::NoMatch;
5515 
5516   // Hash (dollar) is optional as per the ARMARM
5517   if (Parser.getTok().is(AsmToken::Hash) ||
5518       Parser.getTok().is(AsmToken::Dollar)) {
5519     // Avoid parsing into complex operands (#:)
5520     if (Lexer.peekTok().is(AsmToken::Colon))
5521       return ParseStatus::NoMatch;
5522 
5523     // Eat the hash (dollar)
5524     Parser.Lex();
5525   }
5526 
5527   SMLoc Sx1, Ex1;
5528   Sx1 = Parser.getTok().getLoc();
5529   const MCExpr *Imm1Exp;
5530   if (getParser().parseExpression(Imm1Exp, Ex1))
5531     return Error(Sx1, "malformed expression");
5532 
5533   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5534 
5535   if (CE) {
5536     // Immediate must fit within 32-bits
5537     Imm1 = CE->getValue();
5538     int Enc = ARM_AM::getSOImmVal(Imm1);
5539     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5540       // We have a match!
5541       Operands.push_back(ARMOperand::CreateModImm(
5542           (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *this));
5543       return ParseStatus::Success;
5544     }
5545 
5546     // We have parsed an immediate which is not for us, fallback to a plain
5547     // immediate. This can happen for instruction aliases. For an example,
5548     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5549     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5550     // instruction with a mod_imm operand. The alias is defined such that the
5551     // parser method is shared, that's why we have to do this here.
5552     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5553       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
5554       return ParseStatus::Success;
5555     }
5556   } else {
5557     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5558     // MCFixup). Fallback to a plain immediate.
5559     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *this));
5560     return ParseStatus::Success;
5561   }
5562 
5563   // From this point onward, we expect the input to be a (#bits, #rot) pair
5564   if (Parser.getTok().isNot(AsmToken::Comma))
5565     return Error(Sx1,
5566                  "expected modified immediate operand: #[0, 255], #even[0-30]");
5567 
5568   if (Imm1 & ~0xFF)
5569     return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5570 
5571   // Eat the comma
5572   Parser.Lex();
5573 
5574   // Repeat for #rot
5575   SMLoc Sx2, Ex2;
5576   Sx2 = Parser.getTok().getLoc();
5577 
5578   // Eat the optional hash (dollar)
5579   if (Parser.getTok().is(AsmToken::Hash) ||
5580       Parser.getTok().is(AsmToken::Dollar))
5581     Parser.Lex();
5582 
5583   const MCExpr *Imm2Exp;
5584   if (getParser().parseExpression(Imm2Exp, Ex2))
5585     return Error(Sx2, "malformed expression");
5586 
5587   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5588 
5589   if (CE) {
5590     Imm2 = CE->getValue();
5591     if (!(Imm2 & ~0x1E)) {
5592       // We have a match!
5593       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *this));
5594       return ParseStatus::Success;
5595     }
5596     return Error(Sx2,
5597                  "immediate operand must an even number in the range [0, 30]");
5598   } else {
5599     return Error(Sx2, "constant expression expected");
5600   }
5601 }
5602 
parseBitfield(OperandVector & Operands)5603 ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5604   MCAsmParser &Parser = getParser();
5605   SMLoc S = Parser.getTok().getLoc();
5606   // The bitfield descriptor is really two operands, the LSB and the width.
5607   if (Parser.getTok().isNot(AsmToken::Hash) &&
5608       Parser.getTok().isNot(AsmToken::Dollar))
5609     return ParseStatus::NoMatch;
5610   Parser.Lex(); // Eat hash token.
5611 
5612   const MCExpr *LSBExpr;
5613   SMLoc E = Parser.getTok().getLoc();
5614   if (getParser().parseExpression(LSBExpr))
5615     return Error(E, "malformed immediate expression");
5616   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5617   if (!CE)
5618     return Error(E, "'lsb' operand must be an immediate");
5619 
5620   int64_t LSB = CE->getValue();
5621   // The LSB must be in the range [0,31]
5622   if (LSB < 0 || LSB > 31)
5623     return Error(E, "'lsb' operand must be in the range [0,31]");
5624   E = Parser.getTok().getLoc();
5625 
5626   // Expect another immediate operand.
5627   if (Parser.getTok().isNot(AsmToken::Comma))
5628     return Error(Parser.getTok().getLoc(), "too few operands");
5629   Parser.Lex(); // Eat hash token.
5630   if (Parser.getTok().isNot(AsmToken::Hash) &&
5631       Parser.getTok().isNot(AsmToken::Dollar))
5632     return Error(Parser.getTok().getLoc(), "'#' expected");
5633   Parser.Lex(); // Eat hash token.
5634 
5635   const MCExpr *WidthExpr;
5636   SMLoc EndLoc;
5637   if (getParser().parseExpression(WidthExpr, EndLoc))
5638     return Error(E, "malformed immediate expression");
5639   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5640   if (!CE)
5641     return Error(E, "'width' operand must be an immediate");
5642 
5643   int64_t Width = CE->getValue();
5644   // The LSB must be in the range [1,32-lsb]
5645   if (Width < 1 || Width > 32 - LSB)
5646     return Error(E, "'width' operand must be in the range [1,32-lsb]");
5647 
5648   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *this));
5649 
5650   return ParseStatus::Success;
5651 }
5652 
parsePostIdxReg(OperandVector & Operands)5653 ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5654   // Check for a post-index addressing register operand. Specifically:
5655   // postidx_reg := '+' register {, shift}
5656   //              | '-' register {, shift}
5657   //              | register {, shift}
5658 
5659   // This method must return ParseStatus::NoMatch without consuming any tokens
5660   // in the case where there is no match, as other alternatives take other
5661   // parse methods.
5662   MCAsmParser &Parser = getParser();
5663   AsmToken Tok = Parser.getTok();
5664   SMLoc S = Tok.getLoc();
5665   bool haveEaten = false;
5666   bool isAdd = true;
5667   if (Tok.is(AsmToken::Plus)) {
5668     Parser.Lex(); // Eat the '+' token.
5669     haveEaten = true;
5670   } else if (Tok.is(AsmToken::Minus)) {
5671     Parser.Lex(); // Eat the '-' token.
5672     isAdd = false;
5673     haveEaten = true;
5674   }
5675 
5676   SMLoc E = Parser.getTok().getEndLoc();
5677   int Reg = tryParseRegister();
5678   if (Reg == -1) {
5679     if (!haveEaten)
5680       return ParseStatus::NoMatch;
5681     return Error(Parser.getTok().getLoc(), "register expected");
5682   }
5683 
5684   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5685   unsigned ShiftImm = 0;
5686   if (Parser.getTok().is(AsmToken::Comma)) {
5687     Parser.Lex(); // Eat the ','.
5688     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5689       return ParseStatus::Failure;
5690 
5691     // FIXME: Only approximates end...may include intervening whitespace.
5692     E = Parser.getTok().getLoc();
5693   }
5694 
5695   Operands.push_back(
5696       ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, ShiftImm, S, E, *this));
5697 
5698   return ParseStatus::Success;
5699 }
5700 
parseAM3Offset(OperandVector & Operands)5701 ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5702   // Check for a post-index addressing register operand. Specifically:
5703   // am3offset := '+' register
5704   //              | '-' register
5705   //              | register
5706   //              | # imm
5707   //              | # + imm
5708   //              | # - imm
5709 
5710   // This method must return ParseStatus::NoMatch without consuming any tokens
5711   // in the case where there is no match, as other alternatives take other
5712   // parse methods.
5713   MCAsmParser &Parser = getParser();
5714   AsmToken Tok = Parser.getTok();
5715   SMLoc S = Tok.getLoc();
5716 
5717   // Do immediates first, as we always parse those if we have a '#'.
5718   if (Parser.getTok().is(AsmToken::Hash) ||
5719       Parser.getTok().is(AsmToken::Dollar)) {
5720     Parser.Lex(); // Eat '#' or '$'.
5721     // Explicitly look for a '-', as we need to encode negative zero
5722     // differently.
5723     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5724     const MCExpr *Offset;
5725     SMLoc E;
5726     if (getParser().parseExpression(Offset, E))
5727       return ParseStatus::Failure;
5728     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5729     if (!CE)
5730       return Error(S, "constant expression expected");
5731     // Negative zero is encoded as the flag value
5732     // std::numeric_limits<int32_t>::min().
5733     int32_t Val = CE->getValue();
5734     if (isNegative && Val == 0)
5735       Val = std::numeric_limits<int32_t>::min();
5736 
5737     Operands.push_back(ARMOperand::CreateImm(
5738         MCConstantExpr::create(Val, getContext()), S, E, *this));
5739 
5740     return ParseStatus::Success;
5741   }
5742 
5743   bool haveEaten = false;
5744   bool isAdd = true;
5745   if (Tok.is(AsmToken::Plus)) {
5746     Parser.Lex(); // Eat the '+' token.
5747     haveEaten = true;
5748   } else if (Tok.is(AsmToken::Minus)) {
5749     Parser.Lex(); // Eat the '-' token.
5750     isAdd = false;
5751     haveEaten = true;
5752   }
5753 
5754   Tok = Parser.getTok();
5755   int Reg = tryParseRegister();
5756   if (Reg == -1) {
5757     if (!haveEaten)
5758       return ParseStatus::NoMatch;
5759     return Error(Tok.getLoc(), "register expected");
5760   }
5761 
5762   Operands.push_back(ARMOperand::CreatePostIdxReg(
5763       Reg, isAdd, ARM_AM::no_shift, 0, S, Tok.getEndLoc(), *this));
5764 
5765   return ParseStatus::Success;
5766 }
5767 
5768 // Finds the index of the first CondCode operator, if there is none returns 0
findCondCodeInd(const OperandVector & Operands,unsigned MnemonicOpsEndInd)5769 unsigned findCondCodeInd(const OperandVector &Operands,
5770                          unsigned MnemonicOpsEndInd) {
5771   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5772     auto Op = static_cast<ARMOperand &>(*Operands[I]);
5773     if (Op.isCondCode())
5774       return I;
5775   }
5776   return 0;
5777 }
5778 
findCCOutInd(const OperandVector & Operands,unsigned MnemonicOpsEndInd)5779 unsigned findCCOutInd(const OperandVector &Operands,
5780                       unsigned MnemonicOpsEndInd) {
5781   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5782     auto Op = static_cast<ARMOperand &>(*Operands[I]);
5783     if (Op.isCCOut())
5784       return I;
5785   }
5786   return 0;
5787 }
5788 
5789 /// Convert parsed operands to MCInst.  Needed here because this instruction
5790 /// only has two register operands, but multiplication is commutative so
5791 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
cvtThumbMultiply(MCInst & Inst,const OperandVector & Operands)5792 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5793                                     const OperandVector &Operands) {
5794   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5795   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5796   unsigned CondOutI = findCCOutInd(Operands, MnemonicOpsEndInd);
5797 
5798   // 2 operand form
5799   unsigned RegRd = MnemonicOpsEndInd;
5800   unsigned RegRn = MnemonicOpsEndInd + 1;
5801   unsigned RegRm = MnemonicOpsEndInd;
5802 
5803   if (Operands.size() == MnemonicOpsEndInd + 3) {
5804     // If we have a three-operand form, make sure to set Rn to be the operand
5805     // that isn't the same as Rd.
5806     if (((ARMOperand &)*Operands[RegRd]).getReg() ==
5807         ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg()) {
5808       RegRn = MnemonicOpsEndInd + 2;
5809       RegRm = MnemonicOpsEndInd + 1;
5810     } else {
5811       RegRn = MnemonicOpsEndInd + 1;
5812       RegRm = MnemonicOpsEndInd + 2;
5813     }
5814   }
5815 
5816   // Rd
5817   ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, 1);
5818   // CCOut
5819   if (CondOutI != 0) {
5820     ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
5821   } else {
5822     ARMOperand Op =
5823         *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc(), *this);
5824     Op.addCCOutOperands(Inst, 1);
5825   }
5826   // Rn
5827   ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, 1);
5828   // Rm
5829   ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, 1);
5830 
5831   // Cond code
5832   if (CondI != 0) {
5833     ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5834   } else {
5835     ARMOperand Op = *ARMOperand::CreateCondCode(
5836         llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
5837     Op.addCondCodeOperands(Inst, 2);
5838   }
5839 }
5840 
cvtThumbBranches(MCInst & Inst,const OperandVector & Operands)5841 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5842                                     const OperandVector &Operands) {
5843   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5844   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5845   unsigned Cond =
5846       (CondI == 0 ? ARMCC::AL
5847                   : static_cast<ARMOperand &>(*Operands[CondI]).getCondCode());
5848 
5849   // first decide whether or not the branch should be conditional
5850   // by looking at it's location relative to an IT block
5851   if(inITBlock()) {
5852     // inside an IT block we cannot have any conditional branches. any
5853     // such instructions needs to be converted to unconditional form
5854     switch(Inst.getOpcode()) {
5855       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5856       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5857     }
5858   } else {
5859     switch(Inst.getOpcode()) {
5860       case ARM::tB:
5861       case ARM::tBcc:
5862         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5863         break;
5864       case ARM::t2B:
5865       case ARM::t2Bcc:
5866         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5867         break;
5868     }
5869   }
5870 
5871   // now decide on encoding size based on branch target range
5872   switch(Inst.getOpcode()) {
5873     // classify tB as either t2B or t1B based on range of immediate operand
5874     case ARM::tB: {
5875       ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5876       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5877         Inst.setOpcode(ARM::t2B);
5878       break;
5879     }
5880     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5881     case ARM::tBcc: {
5882       ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5883       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5884         Inst.setOpcode(ARM::t2Bcc);
5885       break;
5886     }
5887   }
5888   ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5889   if (CondI != 0) {
5890     ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5891   } else {
5892     ARMOperand Op = *ARMOperand::CreateCondCode(
5893         llvm::ARMCC::AL, Operands[0]->getEndLoc(), *this);
5894     Op.addCondCodeOperands(Inst, 2);
5895   }
5896 }
5897 
cvtMVEVMOVQtoDReg(MCInst & Inst,const OperandVector & Operands)5898 void ARMAsmParser::cvtMVEVMOVQtoDReg(
5899   MCInst &Inst, const OperandVector &Operands) {
5900 
5901   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5902   unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5903 
5904   // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5905   assert(Operands.size() == MnemonicOpsEndInd + 6);
5906 
5907   ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1); // Rt
5908   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
5909       .addRegOperands(Inst, 1); // Rt2
5910   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
5911       .addRegOperands(Inst, 1); // Qd
5912   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
5913       .addMVEPairVectorIndexOperands(Inst, 1); // idx
5914   // skip second copy of Qd in Operands[6]
5915   ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
5916       .addMVEPairVectorIndexOperands(Inst, 1); // idx2
5917   if (CondI != 0) {
5918     ((ARMOperand &)*Operands[CondI])
5919         .addCondCodeOperands(Inst, 2); // condition code
5920   } else {
5921     ARMOperand Op =
5922         *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc(), *this);
5923     Op.addCondCodeOperands(Inst, 2);
5924   }
5925 }
5926 
5927 /// Parse an ARM memory expression, return false if successful else return true
5928 /// or an error.  The first token must be a '[' when called.
parseMemory(OperandVector & Operands)5929 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5930   MCAsmParser &Parser = getParser();
5931   SMLoc S, E;
5932   if (Parser.getTok().isNot(AsmToken::LBrac))
5933     return TokError("Token is not a Left Bracket");
5934   S = Parser.getTok().getLoc();
5935   Parser.Lex(); // Eat left bracket token.
5936 
5937   const AsmToken &BaseRegTok = Parser.getTok();
5938   int BaseRegNum = tryParseRegister();
5939   if (BaseRegNum == -1)
5940     return Error(BaseRegTok.getLoc(), "register expected");
5941 
5942   // The next token must either be a comma, a colon or a closing bracket.
5943   const AsmToken &Tok = Parser.getTok();
5944   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5945       !Tok.is(AsmToken::RBrac))
5946     return Error(Tok.getLoc(), "malformed memory operand");
5947 
5948   if (Tok.is(AsmToken::RBrac)) {
5949     E = Tok.getEndLoc();
5950     Parser.Lex(); // Eat right bracket token.
5951 
5952     Operands.push_back(ARMOperand::CreateMem(
5953         BaseRegNum, nullptr, 0, ARM_AM::no_shift, 0, 0, false, S, E, *this));
5954 
5955     // If there's a pre-indexing writeback marker, '!', just add it as a token
5956     // operand. It's rather odd, but syntactically valid.
5957     if (Parser.getTok().is(AsmToken::Exclaim)) {
5958       Operands.push_back(
5959           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
5960       Parser.Lex(); // Eat the '!'.
5961     }
5962 
5963     return false;
5964   }
5965 
5966   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5967          "Lost colon or comma in memory operand?!");
5968   if (Tok.is(AsmToken::Comma)) {
5969     Parser.Lex(); // Eat the comma.
5970   }
5971 
5972   // If we have a ':', it's an alignment specifier.
5973   if (Parser.getTok().is(AsmToken::Colon)) {
5974     Parser.Lex(); // Eat the ':'.
5975     E = Parser.getTok().getLoc();
5976     SMLoc AlignmentLoc = Tok.getLoc();
5977 
5978     const MCExpr *Expr;
5979     if (getParser().parseExpression(Expr))
5980      return true;
5981 
5982     // The expression has to be a constant. Memory references with relocations
5983     // don't come through here, as they use the <label> forms of the relevant
5984     // instructions.
5985     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5986     if (!CE)
5987       return Error (E, "constant expression expected");
5988 
5989     unsigned Align = 0;
5990     switch (CE->getValue()) {
5991     default:
5992       return Error(E,
5993                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5994     case 16:  Align = 2; break;
5995     case 32:  Align = 4; break;
5996     case 64:  Align = 8; break;
5997     case 128: Align = 16; break;
5998     case 256: Align = 32; break;
5999     }
6000 
6001     // Now we should have the closing ']'
6002     if (Parser.getTok().isNot(AsmToken::RBrac))
6003       return Error(Parser.getTok().getLoc(), "']' expected");
6004     E = Parser.getTok().getEndLoc();
6005     Parser.Lex(); // Eat right bracket token.
6006 
6007     // Don't worry about range checking the value here. That's handled by
6008     // the is*() predicates.
6009     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
6010                                              ARM_AM::no_shift, 0, Align, false,
6011                                              S, E, *this, AlignmentLoc));
6012 
6013     // If there's a pre-indexing writeback marker, '!', just add it as a token
6014     // operand.
6015     if (Parser.getTok().is(AsmToken::Exclaim)) {
6016       Operands.push_back(
6017           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6018       Parser.Lex(); // Eat the '!'.
6019     }
6020 
6021     return false;
6022   }
6023 
6024   // If we have a '#' or '$', it's an immediate offset, else assume it's a
6025   // register offset. Be friendly and also accept a plain integer or expression
6026   // (without a leading hash) for gas compatibility.
6027   if (Parser.getTok().is(AsmToken::Hash) ||
6028       Parser.getTok().is(AsmToken::Dollar) ||
6029       Parser.getTok().is(AsmToken::LParen) ||
6030       Parser.getTok().is(AsmToken::Integer)) {
6031     if (Parser.getTok().is(AsmToken::Hash) ||
6032         Parser.getTok().is(AsmToken::Dollar))
6033       Parser.Lex(); // Eat '#' or '$'
6034     E = Parser.getTok().getLoc();
6035 
6036     bool isNegative = getParser().getTok().is(AsmToken::Minus);
6037     const MCExpr *Offset, *AdjustedOffset;
6038     if (getParser().parseExpression(Offset))
6039      return true;
6040 
6041     if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
6042       // If the constant was #-0, represent it as
6043       // std::numeric_limits<int32_t>::min().
6044       int32_t Val = CE->getValue();
6045       if (isNegative && Val == 0)
6046         CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6047                                     getContext());
6048       // Don't worry about range checking the value here. That's handled by
6049       // the is*() predicates.
6050       AdjustedOffset = CE;
6051     } else
6052       AdjustedOffset = Offset;
6053     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, AdjustedOffset, 0,
6054                                              ARM_AM::no_shift, 0, 0, false, S,
6055                                              E, *this));
6056 
6057     // Now we should have the closing ']'
6058     if (Parser.getTok().isNot(AsmToken::RBrac))
6059       return Error(Parser.getTok().getLoc(), "']' expected");
6060     E = Parser.getTok().getEndLoc();
6061     Parser.Lex(); // Eat right bracket token.
6062 
6063     // If there's a pre-indexing writeback marker, '!', just add it as a token
6064     // operand.
6065     if (Parser.getTok().is(AsmToken::Exclaim)) {
6066       Operands.push_back(
6067           ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6068       Parser.Lex(); // Eat the '!'.
6069     }
6070 
6071     return false;
6072   }
6073 
6074   // The register offset is optionally preceded by a '+' or '-'
6075   bool isNegative = false;
6076   if (Parser.getTok().is(AsmToken::Minus)) {
6077     isNegative = true;
6078     Parser.Lex(); // Eat the '-'.
6079   } else if (Parser.getTok().is(AsmToken::Plus)) {
6080     // Nothing to do.
6081     Parser.Lex(); // Eat the '+'.
6082   }
6083 
6084   E = Parser.getTok().getLoc();
6085   int OffsetRegNum = tryParseRegister();
6086   if (OffsetRegNum == -1)
6087     return Error(E, "register expected");
6088 
6089   // If there's a shift operator, handle it.
6090   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
6091   unsigned ShiftImm = 0;
6092   if (Parser.getTok().is(AsmToken::Comma)) {
6093     Parser.Lex(); // Eat the ','.
6094     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6095       return true;
6096   }
6097 
6098   // Now we should have the closing ']'
6099   if (Parser.getTok().isNot(AsmToken::RBrac))
6100     return Error(Parser.getTok().getLoc(), "']' expected");
6101   E = Parser.getTok().getEndLoc();
6102   Parser.Lex(); // Eat right bracket token.
6103 
6104   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
6105                                            ShiftType, ShiftImm, 0, isNegative,
6106                                            S, E, *this));
6107 
6108   // If there's a pre-indexing writeback marker, '!', just add it as a token
6109   // operand.
6110   if (Parser.getTok().is(AsmToken::Exclaim)) {
6111     Operands.push_back(
6112         ARMOperand::CreateToken("!", Parser.getTok().getLoc(), *this));
6113     Parser.Lex(); // Eat the '!'.
6114   }
6115 
6116   return false;
6117 }
6118 
6119 /// parseMemRegOffsetShift - one of these two:
6120 ///   ( lsl | lsr | asr | ror ) , # shift_amount
6121 ///   rrx
6122 /// return true if it parses a shift otherwise it returns false.
parseMemRegOffsetShift(ARM_AM::ShiftOpc & St,unsigned & Amount)6123 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
6124                                           unsigned &Amount) {
6125   MCAsmParser &Parser = getParser();
6126   SMLoc Loc = Parser.getTok().getLoc();
6127   const AsmToken &Tok = Parser.getTok();
6128   if (Tok.isNot(AsmToken::Identifier))
6129     return Error(Loc, "illegal shift operator");
6130   StringRef ShiftName = Tok.getString();
6131   if (ShiftName == "lsl" || ShiftName == "LSL" ||
6132       ShiftName == "asl" || ShiftName == "ASL")
6133     St = ARM_AM::lsl;
6134   else if (ShiftName == "lsr" || ShiftName == "LSR")
6135     St = ARM_AM::lsr;
6136   else if (ShiftName == "asr" || ShiftName == "ASR")
6137     St = ARM_AM::asr;
6138   else if (ShiftName == "ror" || ShiftName == "ROR")
6139     St = ARM_AM::ror;
6140   else if (ShiftName == "rrx" || ShiftName == "RRX")
6141     St = ARM_AM::rrx;
6142   else if (ShiftName == "uxtw" || ShiftName == "UXTW")
6143     St = ARM_AM::uxtw;
6144   else
6145     return Error(Loc, "illegal shift operator");
6146   Parser.Lex(); // Eat shift type token.
6147 
6148   // rrx stands alone.
6149   Amount = 0;
6150   if (St != ARM_AM::rrx) {
6151     Loc = Parser.getTok().getLoc();
6152     // A '#' and a shift amount.
6153     const AsmToken &HashTok = Parser.getTok();
6154     if (HashTok.isNot(AsmToken::Hash) &&
6155         HashTok.isNot(AsmToken::Dollar))
6156       return Error(HashTok.getLoc(), "'#' expected");
6157     Parser.Lex(); // Eat hash token.
6158 
6159     const MCExpr *Expr;
6160     if (getParser().parseExpression(Expr))
6161       return true;
6162     // Range check the immediate.
6163     // lsl, ror: 0 <= imm <= 31
6164     // lsr, asr: 0 <= imm <= 32
6165     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
6166     if (!CE)
6167       return Error(Loc, "shift amount must be an immediate");
6168     int64_t Imm = CE->getValue();
6169     if (Imm < 0 ||
6170         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
6171         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
6172       return Error(Loc, "immediate shift value out of range");
6173     // If <ShiftTy> #0, turn it into a no_shift.
6174     if (Imm == 0)
6175       St = ARM_AM::lsl;
6176     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
6177     if (Imm == 32)
6178       Imm = 0;
6179     Amount = Imm;
6180   }
6181 
6182   return false;
6183 }
6184 
6185 /// parseFPImm - A floating point immediate expression operand.
parseFPImm(OperandVector & Operands)6186 ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
6187   LLVM_DEBUG(dbgs() << "PARSE FPImm, Ops: " << Operands.size());
6188 
6189   MCAsmParser &Parser = getParser();
6190   // Anything that can accept a floating point constant as an operand
6191   // needs to go through here, as the regular parseExpression is
6192   // integer only.
6193   //
6194   // This routine still creates a generic Immediate operand, containing
6195   // a bitcast of the 64-bit floating point value. The various operands
6196   // that accept floats can check whether the value is valid for them
6197   // via the standard is*() predicates.
6198 
6199   SMLoc S = Parser.getTok().getLoc();
6200 
6201   if (Parser.getTok().isNot(AsmToken::Hash) &&
6202       Parser.getTok().isNot(AsmToken::Dollar))
6203     return ParseStatus::NoMatch;
6204 
6205   // Disambiguate the VMOV forms that can accept an FP immediate.
6206   // vmov.f32 <sreg>, #imm
6207   // vmov.f64 <dreg>, #imm
6208   // vmov.f32 <dreg>, #imm  @ vector f32x2
6209   // vmov.f32 <qreg>, #imm  @ vector f32x4
6210   //
6211   // There are also the NEON VMOV instructions which expect an
6212   // integer constant. Make sure we don't try to parse an FPImm
6213   // for these:
6214   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6215 
6216   bool isVmovf = false;
6217   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
6218   for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
6219     ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[I]);
6220     if (TyOp.isToken() &&
6221         (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6222          TyOp.getToken() == ".f16")) {
6223       isVmovf = true;
6224       break;
6225     }
6226   }
6227 
6228   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6229   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6230                                          Mnemonic.getToken() == "fconsts");
6231   if (!(isVmovf || isFconst))
6232     return ParseStatus::NoMatch;
6233 
6234   Parser.Lex(); // Eat '#' or '$'.
6235 
6236   // Handle negation, as that still comes through as a separate token.
6237   bool isNegative = false;
6238   if (Parser.getTok().is(AsmToken::Minus)) {
6239     isNegative = true;
6240     Parser.Lex();
6241   }
6242   const AsmToken &Tok = Parser.getTok();
6243   SMLoc Loc = Tok.getLoc();
6244   if (Tok.is(AsmToken::Real) && isVmovf) {
6245     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6246     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6247     // If we had a '-' in front, toggle the sign bit.
6248     IntVal ^= (uint64_t)isNegative << 31;
6249     Parser.Lex(); // Eat the token.
6250     Operands.push_back(
6251         ARMOperand::CreateImm(MCConstantExpr::create(IntVal, getContext()), S,
6252                               Parser.getTok().getLoc(), *this));
6253     return ParseStatus::Success;
6254   }
6255   // Also handle plain integers. Instructions which allow floating point
6256   // immediates also allow a raw encoded 8-bit value.
6257   if (Tok.is(AsmToken::Integer) && isFconst) {
6258     int64_t Val = Tok.getIntVal();
6259     Parser.Lex(); // Eat the token.
6260     if (Val > 255 || Val < 0)
6261       return Error(Loc, "encoded floating point value out of range");
6262     float RealVal = ARM_AM::getFPImmFloat(Val);
6263     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6264 
6265     Operands.push_back(
6266         ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S,
6267                               Parser.getTok().getLoc(), *this));
6268     return ParseStatus::Success;
6269   }
6270 
6271   return Error(Loc, "invalid floating point immediate");
6272 }
6273 
6274 /// Parse a arm instruction operand.  For now this parses the operand regardless
6275 /// of the mnemonic.
parseOperand(OperandVector & Operands,StringRef Mnemonic)6276 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6277   MCAsmParser &Parser = getParser();
6278   SMLoc S, E;
6279 
6280   // Check if the current operand has a custom associated parser, if so, try to
6281   // custom parse the operand, or fallback to the general approach.
6282   ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6283   if (ResTy.isSuccess())
6284     return false;
6285   // If there wasn't a custom match, try the generic matcher below. Otherwise,
6286   // there was a match, but an error occurred, in which case, just return that
6287   // the operand parsing failed.
6288   if (ResTy.isFailure())
6289     return true;
6290 
6291   switch (getLexer().getKind()) {
6292   default:
6293     Error(Parser.getTok().getLoc(), "unexpected token in operand");
6294     return true;
6295   case AsmToken::Identifier: {
6296     // If we've seen a branch mnemonic, the next operand must be a label.  This
6297     // is true even if the label is a register name.  So "br r1" means branch to
6298     // label "r1".
6299     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6300     if (!ExpectLabel) {
6301       if (!tryParseRegisterWithWriteBack(Operands))
6302         return false;
6303       int Res = tryParseShiftRegister(Operands);
6304       if (Res == 0) // success
6305         return false;
6306       else if (Res == -1) // irrecoverable error
6307         return true;
6308       // If this is VMRS, check for the apsr_nzcv operand.
6309       if (Mnemonic == "vmrs" &&
6310           Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6311         S = Parser.getTok().getLoc();
6312         Parser.Lex();
6313         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S, *this));
6314         return false;
6315       }
6316     }
6317 
6318     // Fall though for the Identifier case that is not a register or a
6319     // special name.
6320     [[fallthrough]];
6321   }
6322   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
6323   case AsmToken::Integer: // things like 1f and 2b as a branch targets
6324   case AsmToken::String:  // quoted label names.
6325   case AsmToken::Dot: {   // . as a branch target
6326     // This was not a register so parse other operands that start with an
6327     // identifier (like labels) as expressions and create them as immediates.
6328     const MCExpr *IdVal;
6329     S = Parser.getTok().getLoc();
6330     if (getParser().parseExpression(IdVal))
6331       return true;
6332     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6333     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E, *this));
6334     return false;
6335   }
6336   case AsmToken::LBrac:
6337     return parseMemory(Operands);
6338   case AsmToken::LCurly: {
6339     bool AllowOutOfBoundReg = Mnemonic == "vlldm" || Mnemonic == "vlstm";
6340     return parseRegisterList(Operands, !Mnemonic.starts_with("clr"), false,
6341                              AllowOutOfBoundReg);
6342   }
6343   case AsmToken::Dollar:
6344   case AsmToken::Hash: {
6345     // #42 -> immediate
6346     // $ 42 -> immediate
6347     // $foo -> symbol name
6348     // $42 -> symbol name
6349     S = Parser.getTok().getLoc();
6350 
6351     // Favor the interpretation of $-prefixed operands as symbol names.
6352     // Cases where immediates are explicitly expected are handled by their
6353     // specific ParseMethod implementations.
6354     auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6355     bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6356                             (AdjacentToken.is(AsmToken::Identifier) ||
6357                              AdjacentToken.is(AsmToken::Integer));
6358     if (!ExpectIdentifier) {
6359       // Token is not part of identifier. Drop leading $ or # before parsing
6360       // expression.
6361       Parser.Lex();
6362     }
6363 
6364     if (Parser.getTok().isNot(AsmToken::Colon)) {
6365       bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6366       const MCExpr *ImmVal;
6367       if (getParser().parseExpression(ImmVal))
6368         return true;
6369       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6370       if (CE) {
6371         int32_t Val = CE->getValue();
6372         if (IsNegative && Val == 0)
6373           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6374                                           getContext());
6375       }
6376       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6377       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E, *this));
6378 
6379       // There can be a trailing '!' on operands that we want as a separate
6380       // '!' Token operand. Handle that here. For example, the compatibility
6381       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6382       if (Parser.getTok().is(AsmToken::Exclaim)) {
6383         Operands.push_back(ARMOperand::CreateToken(
6384             Parser.getTok().getString(), Parser.getTok().getLoc(), *this));
6385         Parser.Lex(); // Eat exclaim token
6386       }
6387       return false;
6388     }
6389     // w/ a ':' after the '#', it's just like a plain ':'.
6390     [[fallthrough]];
6391   }
6392   case AsmToken::Colon: {
6393     S = Parser.getTok().getLoc();
6394     // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6395     // ":upper8_15:", expression prefixes
6396     // FIXME: Check it's an expression prefix,
6397     // e.g. (FOO - :lower16:BAR) isn't legal.
6398     ARMMCExpr::VariantKind RefKind;
6399     if (parsePrefix(RefKind))
6400       return true;
6401 
6402     const MCExpr *SubExprVal;
6403     if (getParser().parseExpression(SubExprVal))
6404       return true;
6405 
6406     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6407                                               getContext());
6408     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6409     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E, *this));
6410     return false;
6411   }
6412   case AsmToken::Equal: {
6413     S = Parser.getTok().getLoc();
6414     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6415       return Error(S, "unexpected token in operand");
6416     Parser.Lex(); // Eat '='
6417     const MCExpr *SubExprVal;
6418     if (getParser().parseExpression(SubExprVal))
6419       return true;
6420     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6421 
6422     // execute-only: we assume that assembly programmers know what they are
6423     // doing and allow literal pool creation here
6424     Operands.push_back(
6425         ARMOperand::CreateConstantPoolImm(SubExprVal, S, E, *this));
6426     return false;
6427   }
6428   }
6429 }
6430 
parseImmExpr(int64_t & Out)6431 bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6432   const MCExpr *Expr = nullptr;
6433   SMLoc L = getParser().getTok().getLoc();
6434   if (check(getParser().parseExpression(Expr), L, "expected expression"))
6435     return true;
6436   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6437   if (check(!Value, L, "expected constant expression"))
6438     return true;
6439   Out = Value->getValue();
6440   return false;
6441 }
6442 
6443 // parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6444 // :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6445 // :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
parsePrefix(ARMMCExpr::VariantKind & RefKind)6446 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6447   MCAsmParser &Parser = getParser();
6448   RefKind = ARMMCExpr::VK_ARM_None;
6449 
6450   // consume an optional '#' (GNU compatibility)
6451   if (getLexer().is(AsmToken::Hash))
6452     Parser.Lex();
6453 
6454   assert(getLexer().is(AsmToken::Colon) && "expected a :");
6455   Parser.Lex(); // Eat ':'
6456 
6457   if (getLexer().isNot(AsmToken::Identifier)) {
6458     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6459     return true;
6460   }
6461 
6462   enum {
6463     COFF = (1 << MCContext::IsCOFF),
6464     ELF = (1 << MCContext::IsELF),
6465     MACHO = (1 << MCContext::IsMachO),
6466     WASM = (1 << MCContext::IsWasm),
6467   };
6468   static const struct PrefixEntry {
6469     const char *Spelling;
6470     ARMMCExpr::VariantKind VariantKind;
6471     uint8_t SupportedFormats;
6472   } PrefixEntries[] = {
6473       {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6474       {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6475       {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6476       {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6477       {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6478       {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6479   };
6480 
6481   StringRef IDVal = Parser.getTok().getIdentifier();
6482 
6483   const auto &Prefix =
6484       llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6485         return PE.Spelling == IDVal;
6486       });
6487   if (Prefix == std::end(PrefixEntries)) {
6488     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6489     return true;
6490   }
6491 
6492   uint8_t CurrentFormat;
6493   switch (getContext().getObjectFileType()) {
6494   case MCContext::IsMachO:
6495     CurrentFormat = MACHO;
6496     break;
6497   case MCContext::IsELF:
6498     CurrentFormat = ELF;
6499     break;
6500   case MCContext::IsCOFF:
6501     CurrentFormat = COFF;
6502     break;
6503   case MCContext::IsWasm:
6504     CurrentFormat = WASM;
6505     break;
6506   case MCContext::IsGOFF:
6507   case MCContext::IsSPIRV:
6508   case MCContext::IsXCOFF:
6509   case MCContext::IsDXContainer:
6510     llvm_unreachable("unexpected object format");
6511     break;
6512   }
6513 
6514   if (~Prefix->SupportedFormats & CurrentFormat) {
6515     Error(Parser.getTok().getLoc(),
6516           "cannot represent relocation in the current file format");
6517     return true;
6518   }
6519 
6520   RefKind = Prefix->VariantKind;
6521   Parser.Lex();
6522 
6523   if (getLexer().isNot(AsmToken::Colon)) {
6524     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6525     return true;
6526   }
6527   Parser.Lex(); // Eat the last ':'
6528 
6529   // consume an optional trailing '#' (GNU compatibility) bla
6530   parseOptionalToken(AsmToken::Hash);
6531 
6532   return false;
6533 }
6534 
6535 /// Given a mnemonic, split out possible predication code and carry
6536 /// setting letters to form a canonical mnemonic and flags.
6537 //
6538 // FIXME: Would be nice to autogen this.
6539 // FIXME: This is a bit of a maze of special cases.
splitMnemonic(StringRef Mnemonic,StringRef ExtraToken,ARMCC::CondCodes & PredicationCode,ARMVCC::VPTCodes & VPTPredicationCode,bool & CarrySetting,unsigned & ProcessorIMod,StringRef & ITMask)6540 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6541                                       ARMCC::CondCodes &PredicationCode,
6542                                       ARMVCC::VPTCodes &VPTPredicationCode,
6543                                       bool &CarrySetting,
6544                                       unsigned &ProcessorIMod,
6545                                       StringRef &ITMask) {
6546   PredicationCode = ARMCC::AL;
6547   VPTPredicationCode = ARMVCC::None;
6548   CarrySetting = false;
6549   ProcessorIMod = 0;
6550 
6551   // Ignore some mnemonics we know aren't predicated forms.
6552   //
6553   // FIXME: Would be nice to autogen this.
6554   if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6555       Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6556       Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6557       Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6558       Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6559       Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6560       Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6561       Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6562       Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6563       Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6564       Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6565       Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6566       Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6567       Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6568       Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6569       Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6570       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6571       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6572       Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6573       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6574       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6575       Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6576       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6577     return Mnemonic;
6578 
6579   // First, split out any predication code. Ignore mnemonics we know aren't
6580   // predicated but do have a carry-set and so weren't caught above.
6581   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6582       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6583       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6584       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6585       !(hasMVE() &&
6586         (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6587          Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6588          Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6589          Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6590          Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6591          Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6592          Mnemonic.starts_with("vq")))) {
6593     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6594     if (CC != ~0U) {
6595       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6596       PredicationCode = static_cast<ARMCC::CondCodes>(CC);
6597     }
6598   }
6599 
6600   // Next, determine if we have a carry setting bit. We explicitly ignore all
6601   // the instructions we know end in 's'.
6602   if (Mnemonic.ends_with("s") &&
6603       !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6604         Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6605         Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6606         Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6607         Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6608         Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6609         Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6610         Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6611         Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6612         Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6613         (Mnemonic == "movs" && isThumb()))) {
6614     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6615     CarrySetting = true;
6616   }
6617 
6618   // The "cps" instruction can have a interrupt mode operand which is glued into
6619   // the mnemonic. Check if this is the case, split it and parse the imod op
6620   if (Mnemonic.starts_with("cps")) {
6621     // Split out any imod code.
6622     unsigned IMod =
6623       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6624       .Case("ie", ARM_PROC::IE)
6625       .Case("id", ARM_PROC::ID)
6626       .Default(~0U);
6627     if (IMod != ~0U) {
6628       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6629       ProcessorIMod = IMod;
6630     }
6631   }
6632 
6633   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6634       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6635       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6636       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6637       Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6638       Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6639       Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6640     unsigned VCC =
6641         ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size() - 1));
6642     if (VCC != ~0U) {
6643       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6644       VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
6645     }
6646     return Mnemonic;
6647   }
6648 
6649   // The "it" instruction has the condition mask on the end of the mnemonic.
6650   if (Mnemonic.starts_with("it")) {
6651     ITMask = Mnemonic.slice(2, Mnemonic.size());
6652     Mnemonic = Mnemonic.slice(0, 2);
6653   }
6654 
6655   if (Mnemonic.starts_with("vpst")) {
6656     ITMask = Mnemonic.slice(4, Mnemonic.size());
6657     Mnemonic = Mnemonic.slice(0, 4);
6658   } else if (Mnemonic.starts_with("vpt")) {
6659     ITMask = Mnemonic.slice(3, Mnemonic.size());
6660     Mnemonic = Mnemonic.slice(0, 3);
6661   }
6662 
6663   return Mnemonic;
6664 }
6665 
6666 /// Given a canonical mnemonic, determine if the instruction ever allows
6667 /// inclusion of carry set or predication code operands.
6668 //
6669 // FIXME: It would be nice to autogen this.
getMnemonicAcceptInfo(StringRef Mnemonic,StringRef ExtraToken,StringRef FullInst,bool & CanAcceptCarrySet,bool & CanAcceptPredicationCode,bool & CanAcceptVPTPredicationCode)6670 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6671                                          StringRef ExtraToken,
6672                                          StringRef FullInst,
6673                                          bool &CanAcceptCarrySet,
6674                                          bool &CanAcceptPredicationCode,
6675                                          bool &CanAcceptVPTPredicationCode) {
6676   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6677 
6678   CanAcceptCarrySet =
6679       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6680       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6681       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6682       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6683       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6684       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6685       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6686       (!isThumb() &&
6687        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6688         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6689 
6690   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6691       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6692       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6693       Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6694       Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6695       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6696       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6697       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6698       Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6699       Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6700       Mnemonic.starts_with("sha256") ||
6701       (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6702       Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6703       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6704       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6705       Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6706       Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6707       Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6708       Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6709       Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6710       Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6711       Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6712       Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6713       (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6714        !MS.isITPredicableCDEInstr(Mnemonic)) ||
6715       Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6716       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6717       Mnemonic == "bti" ||
6718       (hasMVE() &&
6719        (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6720         Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6721         Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6722         Mnemonic.starts_with("letp")))) {
6723     // These mnemonics are never predicable
6724     CanAcceptPredicationCode = false;
6725   } else if (!isThumb()) {
6726     // Some instructions are only predicable in Thumb mode
6727     CanAcceptPredicationCode =
6728         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6729         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6730         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6731         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6732         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6733         Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6734         !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6735   } else if (isThumbOne()) {
6736     if (hasV6MOps())
6737       CanAcceptPredicationCode = Mnemonic != "movs";
6738     else
6739       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6740   } else
6741     CanAcceptPredicationCode = true;
6742 }
6743 
operandsContainWide(OperandVector & Operands,unsigned MnemonicOpsEndInd)6744 bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6745   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I) {
6746     auto &Op = static_cast<ARMOperand &>(*Operands[I]);
6747     if (Op.isToken() && Op.getToken() == ".w")
6748       return true;
6749   }
6750   return false;
6751 }
6752 
6753 // Some Thumb instructions have two operand forms that are not
6754 // available as three operand, convert to two operand form if possible.
6755 //
6756 // FIXME: We would really like to be able to tablegen'erate this.
tryConvertingToTwoOperandForm(StringRef Mnemonic,ARMCC::CondCodes PredicationCode,bool CarrySetting,OperandVector & Operands,unsigned MnemonicOpsEndInd)6757 void ARMAsmParser::tryConvertingToTwoOperandForm(
6758     StringRef Mnemonic, ARMCC::CondCodes PredicationCode, bool CarrySetting,
6759     OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6760 
6761   if (operandsContainWide(Operands, MnemonicOpsEndInd))
6762     return;
6763   if (Operands.size() != MnemonicOpsEndInd + 3)
6764     return;
6765 
6766   const auto &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
6767   auto &Op4 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
6768   if (!Op3.isReg() || !Op4.isReg())
6769     return;
6770 
6771   auto Op3Reg = Op3.getReg();
6772   auto Op4Reg = Op4.getReg();
6773 
6774   // For most Thumb2 cases we just generate the 3 operand form and reduce
6775   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6776   // won't accept SP or PC so we do the transformation here taking care
6777   // with immediate range in the 'add sp, sp #imm' case.
6778   auto &Op5 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]);
6779   if (isThumbTwo()) {
6780     if (Mnemonic != "add")
6781       return;
6782     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6783                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6784     if (!TryTransform) {
6785       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6786                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6787                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6788                        Op5.isImm() && !Op5.isImm0_508s4());
6789     }
6790     if (!TryTransform)
6791       return;
6792   } else if (!isThumbOne())
6793     return;
6794 
6795   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6796         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6797         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6798         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6799     return;
6800 
6801   // If first 2 operands of a 3 operand instruction are the same
6802   // then transform to 2 operand version of the same instruction
6803   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6804   bool Transform = Op3Reg == Op4Reg;
6805 
6806   // For communtative operations, we might be able to transform if we swap
6807   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6808   // as tADDrsp.
6809   const ARMOperand *LastOp = &Op5;
6810   bool Swap = false;
6811   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6812       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6813        Mnemonic == "and" || Mnemonic == "eor" ||
6814        Mnemonic == "adc" || Mnemonic == "orr")) {
6815     Swap = true;
6816     LastOp = &Op4;
6817     Transform = true;
6818   }
6819 
6820   // If both registers are the same then remove one of them from
6821   // the operand list, with certain exceptions.
6822   if (Transform) {
6823     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6824     // 2 operand forms don't exist.
6825     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6826         LastOp->isReg())
6827       Transform = false;
6828 
6829     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6830     // 3-bits because the ARMARM says not to.
6831     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6832       Transform = false;
6833   }
6834 
6835   if (Transform) {
6836     if (Swap)
6837       std::swap(Op4, Op5);
6838     Operands.erase(Operands.begin() + MnemonicOpsEndInd);
6839   }
6840 }
6841 
6842 // this function returns true if the operand is one of the following
6843 // relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
isThumbI8Relocation(MCParsedAsmOperand & MCOp)6844 static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6845   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6846   if (!Op.isImm())
6847     return false;
6848   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6849   if (CE)
6850     return false;
6851   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6852   if (!E)
6853     return false;
6854   const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6855   if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6856                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6857                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6858                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6859     return true;
6860   return false;
6861 }
6862 
shouldOmitVectorPredicateOperand(StringRef Mnemonic,OperandVector & Operands,unsigned MnemonicOpsEndInd)6863 bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6864     StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6865   if (!hasMVE() || Operands.size() <= MnemonicOpsEndInd)
6866     return true;
6867 
6868   if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6869       Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6870     return true;
6871 
6872   if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6873     return false;
6874 
6875   if (Mnemonic.starts_with("vmov") &&
6876       !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6877         Mnemonic.starts_with("vmovx"))) {
6878     for (auto &Operand : Operands) {
6879       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6880           ((*Operand).isReg() &&
6881            (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6882              (*Operand).getReg()) ||
6883             ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6884               (*Operand).getReg())))) {
6885         return true;
6886       }
6887     }
6888     return false;
6889   } else {
6890     for (auto &Operand : Operands) {
6891       // We check the larger class QPR instead of just the legal class
6892       // MQPR, to more accurately report errors when using Q registers
6893       // outside of the allowed range.
6894       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6895           static_cast<ARMOperand &>(*Operand).isQReg())
6896         return false;
6897     }
6898     return true;
6899   }
6900 }
6901 
6902 // FIXME: This bit should probably be handled via an explicit match class
6903 // in the .td files that matches the suffix instead of having it be
6904 // a literal string token the way it is now.
doesIgnoreDataTypeSuffix(StringRef Mnemonic,StringRef DT)6905 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6906   return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6907 }
6908 
6909 static void applyMnemonicAliases(StringRef &Mnemonic,
6910                                  const FeatureBitset &Features,
6911                                  unsigned VariantID);
6912 
6913 // The GNU assembler has aliases of ldrd, strd, ldrexd, strexd, ldaexd, and
6914 // stlexd with the second register omitted. We don't have a way to do that in
6915 // tablegen, so fix it up here.
6916 //
6917 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6918 // the assembly parser could then generate confusing diagnostics refering to
6919 // it. If we do find anything that prevents us from doing the transformation we
6920 // bail out, and let the assembly parser report an error on the instruction as
6921 // it is written.
fixupGNULDRDAlias(StringRef Mnemonic,OperandVector & Operands,unsigned MnemonicOpsEndInd)6922 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6923                                      OperandVector &Operands,
6924                                      unsigned MnemonicOpsEndInd) {
6925   if (Mnemonic != "ldrd" && Mnemonic != "strd" && Mnemonic != "ldrexd" &&
6926       Mnemonic != "strexd" && Mnemonic != "ldaexd" && Mnemonic != "stlexd")
6927     return;
6928 
6929   unsigned IdX = Mnemonic == "strexd" || Mnemonic == "stlexd"
6930                      ? MnemonicOpsEndInd + 1
6931                      : MnemonicOpsEndInd;
6932 
6933   if (Operands.size() < IdX + 2)
6934     return;
6935 
6936   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[IdX]);
6937   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[IdX + 1]);
6938 
6939   if (!Op2.isReg())
6940     return;
6941   if (!Op3.isGPRMem())
6942     return;
6943 
6944   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6945   if (!GPR.contains(Op2.getReg()))
6946     return;
6947 
6948   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6949   if (!isThumb() && (RtEncoding & 1)) {
6950     // In ARM mode, the registers must be from an aligned pair, this
6951     // restriction does not apply in Thumb mode.
6952     return;
6953   }
6954   if (Op2.getReg() == ARM::PC)
6955     return;
6956   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6957   if (!PairedReg || PairedReg == ARM::PC ||
6958       (PairedReg == ARM::SP && !hasV8Ops()))
6959     return;
6960 
6961   Operands.insert(Operands.begin() + IdX + 1,
6962                   ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
6963                                         Op2.getEndLoc(), *this));
6964 }
6965 
6966 // Dual-register instruction have the following syntax:
6967 // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6968 // This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6969 // operand. If the conversion fails an error is diagnosed, and the function
6970 // returns true.
CDEConvertDualRegOperand(StringRef Mnemonic,OperandVector & Operands,unsigned MnemonicOpsEndInd)6971 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6972                                             OperandVector &Operands,
6973                                             unsigned MnemonicOpsEndInd) {
6974   assert(MS.isCDEDualRegInstr(Mnemonic));
6975 
6976   if (Operands.size() < 3 + MnemonicOpsEndInd)
6977     return false;
6978 
6979   StringRef Op2Diag(
6980       "operand must be an even-numbered register in the range [r0, r10]");
6981 
6982   const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
6983   if (!Op2.isReg())
6984     return Error(Op2.getStartLoc(), Op2Diag);
6985 
6986   unsigned RNext;
6987   unsigned RPair;
6988   switch (Op2.getReg()) {
6989   default:
6990     return Error(Op2.getStartLoc(), Op2Diag);
6991   case ARM::R0:
6992     RNext = ARM::R1;
6993     RPair = ARM::R0_R1;
6994     break;
6995   case ARM::R2:
6996     RNext = ARM::R3;
6997     RPair = ARM::R2_R3;
6998     break;
6999   case ARM::R4:
7000     RNext = ARM::R5;
7001     RPair = ARM::R4_R5;
7002     break;
7003   case ARM::R6:
7004     RNext = ARM::R7;
7005     RPair = ARM::R6_R7;
7006     break;
7007   case ARM::R8:
7008     RNext = ARM::R9;
7009     RPair = ARM::R8_R9;
7010     break;
7011   case ARM::R10:
7012     RNext = ARM::R11;
7013     RPair = ARM::R10_R11;
7014     break;
7015   }
7016 
7017   const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
7018   if (!Op3.isReg() || Op3.getReg() != RNext)
7019     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
7020 
7021   Operands.erase(Operands.begin() + MnemonicOpsEndInd + 2);
7022   Operands[MnemonicOpsEndInd + 1] =
7023       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc(), *this);
7024   return false;
7025 }
7026 
removeCondCode(OperandVector & Operands,unsigned & MnemonicOpsEndInd)7027 void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7028   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7029     if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) {
7030       Operands.erase(Operands.begin() + I);
7031       --MnemonicOpsEndInd;
7032       break;
7033     }
7034 }
7035 
removeCCOut(OperandVector & Operands,unsigned & MnemonicOpsEndInd)7036 void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7037   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7038     if (static_cast<ARMOperand &>(*Operands[I]).isCCOut()) {
7039       Operands.erase(Operands.begin() + I);
7040       --MnemonicOpsEndInd;
7041       break;
7042     }
7043 }
7044 
removeVPTCondCode(OperandVector & Operands,unsigned & MnemonicOpsEndInd)7045 void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7046   for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7047     if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) {
7048       Operands.erase(Operands.begin() + I);
7049       --MnemonicOpsEndInd;
7050       break;
7051     }
7052 }
7053 
7054 /// Parse an arm instruction mnemonic followed by its operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)7055 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
7056                                     SMLoc NameLoc, OperandVector &Operands) {
7057   MCAsmParser &Parser = getParser();
7058 
7059   // Apply mnemonic aliases before doing anything else, as the destination
7060   // mnemonic may include suffices and we want to handle them normally.
7061   // The generic tblgen'erated code does this later, at the start of
7062   // MatchInstructionImpl(), but that's too late for aliases that include
7063   // any sort of suffix.
7064   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7065   unsigned AssemblerDialect = getParser().getAssemblerDialect();
7066   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
7067 
7068   // First check for the ARM-specific .req directive.
7069   if (Parser.getTok().is(AsmToken::Identifier) &&
7070       Parser.getTok().getIdentifier().lower() == ".req") {
7071     parseDirectiveReq(Name, NameLoc);
7072     // We always return 'error' for this, as we're done with this
7073     // statement and don't need to match the 'instruction."
7074     return true;
7075   }
7076 
7077   // Create the leading tokens for the mnemonic, split by '.' characters.
7078   size_t Start = 0, Next = Name.find('.');
7079   StringRef Mnemonic = Name.slice(Start, Next);
7080   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
7081 
7082   // Split out the predication code and carry setting flag from the mnemonic.
7083   ARMCC::CondCodes PredicationCode;
7084   ARMVCC::VPTCodes VPTPredicationCode;
7085   unsigned ProcessorIMod;
7086   bool CarrySetting;
7087   StringRef ITMask;
7088   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7089                            CarrySetting, ProcessorIMod, ITMask);
7090 
7091   // In Thumb1, only the branch (B) instruction can be predicated.
7092   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
7093     return Error(NameLoc, "conditional execution not supported in Thumb1");
7094   }
7095 
7096   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
7097 
7098   // Handle the mask for IT and VPT instructions. In ARMOperand and
7099   // MCOperand, this is stored in a format independent of the
7100   // condition code: the lowest set bit indicates the end of the
7101   // encoding, and above that, a 1 bit indicates 'else', and an 0
7102   // indicates 'then'. E.g.
7103   //    IT    -> 1000
7104   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
7105   //    ITxy  -> xy10    (e.g. ITET -> 1010)
7106   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
7107   // Note: See the ARM::PredBlockMask enum in
7108   //   /lib/Target/ARM/Utils/ARMBaseInfo.h
7109   if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
7110       Mnemonic.starts_with("vpst")) {
7111     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7112                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7113                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7114     if (ITMask.size() > 3) {
7115       if (Mnemonic == "it")
7116         return Error(Loc, "too many conditions on IT instruction");
7117       return Error(Loc, "too many conditions on VPT instruction");
7118     }
7119     unsigned Mask = 8;
7120     for (char Pos : llvm::reverse(ITMask)) {
7121       if (Pos != 't' && Pos != 'e') {
7122         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7123       }
7124       Mask >>= 1;
7125       if (Pos == 'e')
7126         Mask |= 8;
7127     }
7128     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc, *this));
7129   }
7130 
7131   // FIXME: This is all a pretty gross hack. We should automatically handle
7132   // optional operands like this via tblgen.
7133 
7134   // Next, add the CCOut and ConditionCode operands, if needed.
7135   //
7136   // For mnemonics which can ever incorporate a carry setting bit or predication
7137   // code, our matching model involves us always generating CCOut and
7138   // ConditionCode operands to match the mnemonic "as written" and then we let
7139   // the matcher deal with finding the right instruction or generating an
7140   // appropriate error.
7141   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7142   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7143                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7144 
7145   // If we had a carry-set on an instruction that can't do that, issue an
7146   // error.
7147   if (!CanAcceptCarrySet && CarrySetting) {
7148     return Error(NameLoc, "instruction '" + Mnemonic +
7149                  "' can not set flags, but 's' suffix specified");
7150   }
7151   // If we had a predication code on an instruction that can't do that, issue an
7152   // error.
7153   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7154     return Error(NameLoc, "instruction '" + Mnemonic +
7155                  "' is not predicable, but condition code specified");
7156   }
7157 
7158   // If we had a VPT predication code on an instruction that can't do that, issue an
7159   // error.
7160   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7161     return Error(NameLoc, "instruction '" + Mnemonic +
7162                  "' is not VPT predicable, but VPT code T/E is specified");
7163   }
7164 
7165   // Add the carry setting operand, if necessary.
7166   if (CanAcceptCarrySet && CarrySetting) {
7167     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7168     Operands.push_back(
7169         ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, Loc, *this));
7170   }
7171 
7172   // Add the predication code operand, if necessary.
7173   if (CanAcceptPredicationCode && PredicationCode != llvm::ARMCC::AL) {
7174     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7175                                       CarrySetting);
7176     Operands.push_back(ARMOperand::CreateCondCode(
7177         ARMCC::CondCodes(PredicationCode), Loc, *this));
7178   }
7179 
7180   // Add the VPT predication code operand, if necessary.
7181   // Dont add in certain cases of VCVT as this needs to be disambiguated
7182   // after operand parsing.
7183   if (CanAcceptVPTPredicationCode && VPTPredicationCode != llvm::ARMVCC::None &&
7184       !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7185         Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7186     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7187                                       CarrySetting);
7188     Operands.push_back(ARMOperand::CreateVPTPred(
7189         ARMVCC::VPTCodes(VPTPredicationCode), Loc, *this));
7190   }
7191 
7192   // Add the processor imod operand, if necessary.
7193   if (ProcessorIMod) {
7194     Operands.push_back(ARMOperand::CreateImm(
7195         MCConstantExpr::create(ProcessorIMod, getContext()), NameLoc, NameLoc,
7196         *this));
7197   } else if (Mnemonic == "cps" && isMClass()) {
7198     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7199   }
7200 
7201   // Add the remaining tokens in the mnemonic.
7202   while (Next != StringRef::npos) {
7203     Start = Next;
7204     Next = Name.find('.', Start + 1);
7205     ExtraToken = Name.slice(Start, Next);
7206 
7207     // Some NEON instructions have an optional datatype suffix that is
7208     // completely ignored. Check for that.
7209     if (isDataTypeToken(ExtraToken) &&
7210         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7211       continue;
7212 
7213     // For for ARM mode generate an error if the .n qualifier is used.
7214     if (ExtraToken == ".n" && !isThumb()) {
7215       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7216       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7217                    "arm mode");
7218     }
7219 
7220     // The .n qualifier is always discarded as that is what the tables
7221     // and matcher expect.  In ARM mode the .w qualifier has no effect,
7222     // so discard it to avoid errors that can be caused by the matcher.
7223     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7224       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7225       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc, *this));
7226     }
7227   }
7228 
7229   // This marks the end of the LHS Mnemonic operators.
7230   // This is used for indexing into the non-menmonic operators as some of the
7231   // mnemonic operators are optional and therfore indexes can differ.
7232   unsigned MnemonicOpsEndInd = Operands.size();
7233 
7234   // Read the remaining operands.
7235   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7236     // Read the first operand.
7237     if (parseOperand(Operands, Mnemonic)) {
7238       return true;
7239     }
7240 
7241     while (parseOptionalToken(AsmToken::Comma)) {
7242       // Parse and remember the operand.
7243       if (parseOperand(Operands, Mnemonic)) {
7244         return true;
7245       }
7246     }
7247   }
7248 
7249   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7250     return true;
7251 
7252   tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7253                                 Operands, MnemonicOpsEndInd);
7254 
7255   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7256     // Dual-register instructions use even-odd register pairs as their
7257     // destination operand, in assembly such pair is spelled as two
7258     // consecutive registers, without any special syntax. ConvertDualRegOperand
7259     // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7260     // It returns true, if an error message has been emitted. If the function
7261     // returns false, the function either succeeded or an error (e.g. missing
7262     // operand) will be diagnosed elsewhere.
7263     if (MS.isCDEDualRegInstr(Mnemonic)) {
7264       bool GotError =
7265           CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
7266       if (GotError)
7267         return GotError;
7268     }
7269   }
7270 
7271   if (hasMVE()) {
7272     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7273                                           MnemonicOpsEndInd) &&
7274         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7275       // Very nasty hack to deal with the vector predicated variant of vmovlt
7276       // the scalar predicated vmov with condition 'lt'.  We can not tell them
7277       // apart until we have parsed their operands.
7278       Operands.erase(Operands.begin() + 1);
7279       Operands.erase(Operands.begin());
7280       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7281       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7282                                          Mnemonic.size() - 1 + CarrySetting);
7283       Operands.insert(Operands.begin(),
7284                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc, *this));
7285       Operands.insert(Operands.begin(), ARMOperand::CreateToken(
7286                                             StringRef("vmovlt"), MLoc, *this));
7287     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7288                !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7289                                                  MnemonicOpsEndInd)) {
7290       // Another nasty hack to deal with the ambiguity between vcvt with scalar
7291       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
7292       // can only distinguish between the two after we have parsed their
7293       // operands.
7294       Operands.erase(Operands.begin() + 1);
7295       Operands.erase(Operands.begin());
7296       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7297       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7298                                          Mnemonic.size() - 1 + CarrySetting);
7299       Operands.insert(Operands.begin(),
7300                       ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc, *this));
7301       Operands.insert(Operands.begin(),
7302                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc, *this));
7303     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7304                !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7305                                                  MnemonicOpsEndInd)) {
7306       // Another hack, this time to distinguish between scalar predicated vmul
7307       // with 'lt' predication code and the vector instruction vmullt with
7308       // vector predication code "none"
7309       removeCondCode(Operands, MnemonicOpsEndInd);
7310       Operands.erase(Operands.begin());
7311       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7312       Operands.insert(Operands.begin(), ARMOperand::CreateToken(
7313                                             StringRef("vmullt"), MLoc, *this));
7314     } else if (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7315                !Mnemonic.starts_with("vcvtn") &&
7316                !Mnemonic.starts_with("vcvtp") &&
7317                !Mnemonic.starts_with("vcvtm")) {
7318       if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7319                                             MnemonicOpsEndInd)) {
7320         // We could not split the vector predicate off vcvt because it might
7321         // have been the scalar vcvtt instruction.  Now we know its a vector
7322         // instruction, we still need to check whether its the vector
7323         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
7324         // distinguish the two based on the suffixes, if it is any of
7325         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7326         if (Mnemonic.starts_with("vcvtt") && MnemonicOpsEndInd > 2) {
7327           auto Sz1 =
7328               static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 2]);
7329           auto Sz2 =
7330               static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 1]);
7331           if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7332                 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7333             Operands.erase(Operands.begin());
7334             SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7335             VPTPredicationCode = ARMVCC::Then;
7336 
7337             Mnemonic = Mnemonic.substr(0, 4);
7338             Operands.insert(Operands.begin(),
7339                             ARMOperand::CreateToken(Mnemonic, MLoc, *this));
7340           }
7341         }
7342         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7343                                           Mnemonic.size() + CarrySetting);
7344         // Add VPTPred
7345         Operands.insert(Operands.begin() + 1,
7346                         ARMOperand::CreateVPTPred(
7347                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc, *this));
7348         ++MnemonicOpsEndInd;
7349       }
7350     } else if (CanAcceptVPTPredicationCode) {
7351       // For all other instructions, make sure only one of the two
7352       // predication operands is left behind, depending on whether we should
7353       // use the vector predication.
7354       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7355                                            MnemonicOpsEndInd)) {
7356         removeVPTCondCode(Operands, MnemonicOpsEndInd);
7357       }
7358     }
7359   }
7360 
7361   if (VPTPredicationCode != ARMVCC::None) {
7362     bool usedVPTPredicationCode = false;
7363     for (unsigned I = 1; I < Operands.size(); ++I)
7364       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7365         usedVPTPredicationCode = true;
7366     if (!usedVPTPredicationCode) {
7367       // If we have a VPT predication code and we haven't just turned it
7368       // into an operand, then it was a mistake for splitMnemonic to
7369       // separate it from the rest of the mnemonic in the first place,
7370       // and this may lead to wrong disassembly (e.g. scalar floating
7371       // point VCMPE is actually a different instruction from VCMP, so
7372       // we mustn't treat them the same). In that situation, glue it
7373       // back on.
7374       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7375       Operands.erase(Operands.begin());
7376       Operands.insert(Operands.begin(),
7377                       ARMOperand::CreateToken(Mnemonic, NameLoc, *this));
7378     }
7379   }
7380 
7381   // ARM mode 'blx' need special handling, as the register operand version
7382   // is predicable, but the label operand version is not. So, we can't rely
7383   // on the Mnemonic based checking to correctly figure out when to put
7384   // a k_CondCode operand in the list. If we're trying to match the label
7385   // version, remove the k_CondCode operand here.
7386   if (!isThumb() && Mnemonic == "blx" &&
7387       Operands.size() == MnemonicOpsEndInd + 1 &&
7388       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
7389     removeCondCode(Operands, MnemonicOpsEndInd);
7390 
7391   // GNU Assembler extension (compatibility).
7392   fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
7393 
7394   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7395   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7396   // a single GPRPair reg operand is used in the .td file to replace the two
7397   // GPRs. However, when parsing from asm, the two GRPs cannot be
7398   // automatically
7399   // expressed as a GPRPair, so we have to manually merge them.
7400   // FIXME: We would really like to be able to tablegen'erate this.
7401   bool IsLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7402   if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7403       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7404        Mnemonic == "stlexd")) {
7405     unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7406     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7407     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7408 
7409     const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7410     // Adjust only if Op1 is a GPR.
7411     if (Op1.isReg() && MRC.contains(Op1.getReg())) {
7412       unsigned Reg1 = Op1.getReg();
7413       unsigned Rt = MRI->getEncodingValue(Reg1);
7414       unsigned Reg2 = Op2.getReg();
7415       unsigned Rt2 = MRI->getEncodingValue(Reg2);
7416       // Rt2 must be Rt + 1.
7417       if (Rt + 1 != Rt2)
7418         return Error(Op2.getStartLoc(),
7419                      IsLoad ? "destination operands must be sequential"
7420                             : "source operands must be sequential");
7421 
7422       // Rt must be even
7423       if (Rt & 1)
7424         return Error(
7425             Op1.getStartLoc(),
7426             IsLoad ? "destination operands must start start at an even register"
7427                    : "source operands must start start at an even register");
7428 
7429       unsigned NewReg = MRI->getMatchingSuperReg(
7430           Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7431       Operands[Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
7432                                             Op2.getEndLoc(), *this);
7433       Operands.erase(Operands.begin() + Idx + 1);
7434     }
7435   }
7436 
7437   // FIXME: As said above, this is all a pretty gross hack.  This instruction
7438   // does not fit with other "subs" and tblgen.
7439   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7440   // so the Mnemonic is the original name "subs" and delete the predicate
7441   // operand so it will match the table entry.
7442   if (isThumbTwo() && Mnemonic == "sub" &&
7443       Operands.size() == MnemonicOpsEndInd + 3 &&
7444       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isReg() &&
7445       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).getReg() ==
7446           ARM::PC &&
7447       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
7448       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
7449           ARM::LR &&
7450       static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
7451     Operands.front() = ARMOperand::CreateToken(Name, NameLoc, *this);
7452     removeCCOut(Operands, MnemonicOpsEndInd);
7453   }
7454   return false;
7455 }
7456 
7457 // Validate context-sensitive operand constraints.
7458 
7459 // return 'true' if register list contains non-low GPR registers,
7460 // 'false' otherwise. If Reg is in the register list or is HiReg, set
7461 // 'containsReg' to true.
checkLowRegisterList(const MCInst & Inst,unsigned OpNo,unsigned Reg,unsigned HiReg,bool & containsReg)7462 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7463                                  unsigned Reg, unsigned HiReg,
7464                                  bool &containsReg) {
7465   containsReg = false;
7466   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7467     unsigned OpReg = Inst.getOperand(i).getReg();
7468     if (OpReg == Reg)
7469       containsReg = true;
7470     // Anything other than a low register isn't legal here.
7471     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7472       return true;
7473   }
7474   return false;
7475 }
7476 
7477 // Check if the specified regisgter is in the register list of the inst,
7478 // starting at the indicated operand number.
listContainsReg(const MCInst & Inst,unsigned OpNo,unsigned Reg)7479 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7480   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7481     unsigned OpReg = Inst.getOperand(i).getReg();
7482     if (OpReg == Reg)
7483       return true;
7484   }
7485   return false;
7486 }
7487 
7488 // Return true if instruction has the interesting property of being
7489 // allowed in IT blocks, but not being predicable.
instIsBreakpoint(const MCInst & Inst)7490 static bool instIsBreakpoint(const MCInst &Inst) {
7491     return Inst.getOpcode() == ARM::tBKPT ||
7492            Inst.getOpcode() == ARM::BKPT ||
7493            Inst.getOpcode() == ARM::tHLT ||
7494            Inst.getOpcode() == ARM::HLT;
7495 }
7496 
getRegListInd(const OperandVector & Operands,unsigned MnemonicOpsEndInd)7497 unsigned getRegListInd(const OperandVector &Operands,
7498                        unsigned MnemonicOpsEndInd) {
7499   for (unsigned I = MnemonicOpsEndInd; I < Operands.size(); ++I) {
7500     const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[I]);
7501     if (Op.isRegList()) {
7502       return I;
7503     }
7504   }
7505   return 0;
7506 }
7507 
validatetLDMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned MnemonicOpsEndInd,unsigned ListIndex,bool IsARPop)7508 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7509                                        const OperandVector &Operands,
7510                                        unsigned MnemonicOpsEndInd,
7511                                        unsigned ListIndex, bool IsARPop) {
7512   bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7513   bool ListContainsLR = listContainsReg(Inst, ListIndex, ARM::LR);
7514   bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7515 
7516   if (!IsARPop && ListContainsSP)
7517     return Error(
7518         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7519         "SP may not be in the register list");
7520   if (ListContainsPC && ListContainsLR)
7521     return Error(
7522         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7523         "PC and LR may not be in the register list simultaneously");
7524   return false;
7525 }
7526 
validatetSTMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned MnemonicOpsEndInd,unsigned ListIndex)7527 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7528                                        const OperandVector &Operands,
7529                                        unsigned MnemonicOpsEndInd,
7530                                        unsigned ListIndex) {
7531   bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7532   bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7533 
7534   if (ListContainsSP && ListContainsPC)
7535     return Error(
7536         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7537         "SP and PC may not be in the register list");
7538   if (ListContainsSP)
7539     return Error(
7540         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7541         "SP may not be in the register list");
7542   if (ListContainsPC)
7543     return Error(
7544         Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7545         "PC may not be in the register list");
7546   return false;
7547 }
7548 
validateLDRDSTRD(MCInst & Inst,const OperandVector & Operands,bool Load,bool ARMMode,bool Writeback,unsigned MnemonicOpsEndInd)7549 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
7550                                     bool Load, bool ARMMode, bool Writeback,
7551                                     unsigned MnemonicOpsEndInd) {
7552   unsigned RtIndex = Load || !Writeback ? 0 : 1;
7553   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7554   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7555 
7556   if (ARMMode) {
7557     // Rt can't be R14.
7558     if (Rt == 14)
7559       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7560                    "Rt can't be R14");
7561 
7562     // Rt must be even-numbered.
7563     if ((Rt & 1) == 1)
7564       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7565                    "Rt must be even-numbered");
7566 
7567     // Rt2 must be Rt + 1.
7568     if (Rt2 != Rt + 1) {
7569       if (Load)
7570         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7571                      "destination operands must be sequential");
7572       else
7573         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7574                      "source operands must be sequential");
7575     }
7576 
7577     // FIXME: Diagnose m == 15
7578     // FIXME: Diagnose ldrd with m == t || m == t2.
7579   }
7580 
7581   if (!ARMMode && Load) {
7582     if (Rt2 == Rt)
7583       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7584                    "destination operands can't be identical");
7585   }
7586 
7587   if (Writeback) {
7588     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7589 
7590     if (Rn == Rt || Rn == Rt2) {
7591       if (Load)
7592         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7593                      "base register needs to be different from destination "
7594                      "registers");
7595       else
7596         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7597                      "source register and base register can't be identical");
7598     }
7599 
7600     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7601     // (Except the immediate form of ldrd?)
7602   }
7603 
7604   return false;
7605 }
7606 
findFirstVectorPredOperandIdx(const MCInstrDesc & MCID)7607 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7608   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7609     if (ARM::isVpred(MCID.operands()[i].OperandType))
7610       return i;
7611   }
7612   return -1;
7613 }
7614 
isVectorPredicable(const MCInstrDesc & MCID)7615 static bool isVectorPredicable(const MCInstrDesc &MCID) {
7616   return findFirstVectorPredOperandIdx(MCID) != -1;
7617 }
7618 
isARMMCExpr(MCParsedAsmOperand & MCOp)7619 static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7620   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7621   if (!Op.isImm())
7622     return false;
7623   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7624   if (CE)
7625     return false;
7626   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7627   if (!E)
7628     return false;
7629   return true;
7630 }
7631 
7632 // FIXME: We would really like to be able to tablegen'erate this.
validateInstruction(MCInst & Inst,const OperandVector & Operands,unsigned MnemonicOpsEndInd)7633 bool ARMAsmParser::validateInstruction(MCInst &Inst,
7634                                        const OperandVector &Operands,
7635                                        unsigned MnemonicOpsEndInd) {
7636   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7637   SMLoc Loc = Operands[0]->getStartLoc();
7638 
7639   // Check the IT block state first.
7640   // NOTE: BKPT and HLT instructions have the interesting property of being
7641   // allowed in IT blocks, but not being predicable. They just always execute.
7642   if (inITBlock() && !instIsBreakpoint(Inst)) {
7643     // The instruction must be predicable.
7644     if (!MCID.isPredicable())
7645       return Error(Loc, "instructions in IT block must be predicable");
7646     ARMCC::CondCodes Cond = ARMCC::CondCodes(
7647         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7648     if (Cond != currentITCond()) {
7649       // Find the condition code Operand to get its SMLoc information.
7650       SMLoc CondLoc = Operands[0]->getEndLoc();
7651       for (unsigned I = 1; I < Operands.size(); ++I)
7652         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7653           CondLoc = Operands[I]->getStartLoc();
7654       return Error(CondLoc, "incorrect condition in IT block; got '" +
7655                                 StringRef(ARMCondCodeToString(Cond)) +
7656                                 "', but expected '" +
7657                                 ARMCondCodeToString(currentITCond()) + "'");
7658     }
7659   // Check for non-'al' condition codes outside of the IT block.
7660   } else if (isThumbTwo() && MCID.isPredicable() &&
7661              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7662              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7663              Inst.getOpcode() != ARM::t2Bcc &&
7664              Inst.getOpcode() != ARM::t2BFic) {
7665     return Error(Loc, "predicated instructions must be in IT block");
7666   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7667              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7668                  ARMCC::AL) {
7669     return Warning(Loc, "predicated instructions should be in IT block");
7670   } else if (!MCID.isPredicable()) {
7671     // Check the instruction doesn't have a predicate operand anyway
7672     // that it's not allowed to use. Sometimes this happens in order
7673     // to keep instructions the same shape even though one cannot
7674     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7675     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7676       if (MCID.operands()[i].isPredicate()) {
7677         if (Inst.getOperand(i).getImm() != ARMCC::AL)
7678           return Error(Loc, "instruction is not predicable");
7679         break;
7680       }
7681     }
7682   }
7683 
7684   // PC-setting instructions in an IT block, but not the last instruction of
7685   // the block, are UNPREDICTABLE.
7686   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7687     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7688   }
7689 
7690   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7691     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7692     if (!isVectorPredicable(MCID))
7693       return Error(Loc, "instruction in VPT block must be predicable");
7694     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7695     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7696     if (Pred != VPTPred) {
7697       SMLoc PredLoc;
7698       for (unsigned I = 1; I < Operands.size(); ++I)
7699         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7700           PredLoc = Operands[I]->getStartLoc();
7701       return Error(PredLoc, "incorrect predication in VPT block; got '" +
7702                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7703                    "', but expected '" +
7704                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7705     }
7706   }
7707   else if (isVectorPredicable(MCID) &&
7708            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7709            ARMVCC::None)
7710     return Error(Loc, "VPT predicated instructions must be in VPT block");
7711 
7712   const unsigned Opcode = Inst.getOpcode();
7713   switch (Opcode) {
7714   case ARM::VLLDM:
7715   case ARM::VLLDM_T2:
7716   case ARM::VLSTM:
7717   case ARM::VLSTM_T2: {
7718     // Since in some cases both T1 and T2 are valid, tablegen can not always
7719     // pick the correct instruction.
7720     if (Operands.size() ==
7721         MnemonicOpsEndInd + 2) { // a register list has been provided
7722       ARMOperand &Op = static_cast<ARMOperand &>(
7723           *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
7724       assert(Op.isDPRRegList());
7725       auto &RegList = Op.getRegList();
7726       // T2 requires v8.1-M.Main (cannot be handled by tablegen)
7727       if (RegList.size() == 32 && !hasV8_1MMainline()) {
7728         return Error(Op.getEndLoc(), "T2 version requires v8.1-M.Main");
7729       }
7730       // When target has 32 D registers, T1 is undefined.
7731       if (hasD32() && RegList.size() != 32) {
7732         return Error(Op.getEndLoc(), "operand must be exactly {d0-d31}");
7733       }
7734       // When target has 16 D registers, both T1 and T2 are valid.
7735       if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7736         return Error(Op.getEndLoc(),
7737                      "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7738       }
7739     }
7740     return false;
7741   }
7742   case ARM::t2IT: {
7743     // Encoding is unpredictable if it ever results in a notional 'NV'
7744     // predicate. Since we don't parse 'NV' directly this means an 'AL'
7745     // predicate with an "else" mask bit.
7746     unsigned Cond = Inst.getOperand(0).getImm();
7747     unsigned Mask = Inst.getOperand(1).getImm();
7748 
7749     // Conditions only allowing a 't' are those with no set bit except
7750     // the lowest-order one that indicates the end of the sequence. In
7751     // other words, powers of 2.
7752     if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7753       return Error(Loc, "unpredictable IT predicate sequence");
7754     break;
7755   }
7756   case ARM::LDRD:
7757     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7758                          /*Writeback*/ false, MnemonicOpsEndInd))
7759       return true;
7760     break;
7761   case ARM::LDRD_PRE:
7762   case ARM::LDRD_POST:
7763     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7764                          /*Writeback*/ true, MnemonicOpsEndInd))
7765       return true;
7766     break;
7767   case ARM::t2LDRDi8:
7768     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7769                          /*Writeback*/ false, MnemonicOpsEndInd))
7770       return true;
7771     break;
7772   case ARM::t2LDRD_PRE:
7773   case ARM::t2LDRD_POST:
7774     if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7775                          /*Writeback*/ true, MnemonicOpsEndInd))
7776       return true;
7777     break;
7778   case ARM::t2BXJ: {
7779     const unsigned RmReg = Inst.getOperand(0).getReg();
7780     // Rm = SP is no longer unpredictable in v8-A
7781     if (RmReg == ARM::SP && !hasV8Ops())
7782       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7783                    "r13 (SP) is an unpredictable operand to BXJ");
7784     return false;
7785   }
7786   case ARM::STRD:
7787     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7788                          /*Writeback*/ false, MnemonicOpsEndInd))
7789       return true;
7790     break;
7791   case ARM::STRD_PRE:
7792   case ARM::STRD_POST:
7793     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7794                          /*Writeback*/ true, MnemonicOpsEndInd))
7795       return true;
7796     break;
7797   case ARM::t2STRD_PRE:
7798   case ARM::t2STRD_POST:
7799     if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ false,
7800                          /*Writeback*/ true, MnemonicOpsEndInd))
7801       return true;
7802     break;
7803   case ARM::STR_PRE_IMM:
7804   case ARM::STR_PRE_REG:
7805   case ARM::t2STR_PRE:
7806   case ARM::STR_POST_IMM:
7807   case ARM::STR_POST_REG:
7808   case ARM::t2STR_POST:
7809   case ARM::STRH_PRE:
7810   case ARM::t2STRH_PRE:
7811   case ARM::STRH_POST:
7812   case ARM::t2STRH_POST:
7813   case ARM::STRB_PRE_IMM:
7814   case ARM::STRB_PRE_REG:
7815   case ARM::t2STRB_PRE:
7816   case ARM::STRB_POST_IMM:
7817   case ARM::STRB_POST_REG:
7818   case ARM::t2STRB_POST: {
7819     // Rt must be different from Rn.
7820     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7821     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7822 
7823     if (Rt == Rn)
7824       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7825                    "source register and base register can't be identical");
7826     return false;
7827   }
7828   case ARM::t2LDR_PRE_imm:
7829   case ARM::t2LDR_POST_imm:
7830   case ARM::t2STR_PRE_imm:
7831   case ARM::t2STR_POST_imm: {
7832     // Rt must be different from Rn.
7833     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7834     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7835 
7836     if (Rt == Rn)
7837       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7838                    "destination register and base register can't be identical");
7839     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7840         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7841       int Imm = Inst.getOperand(2).getImm();
7842       if (Imm > 255 || Imm < -255)
7843         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7844                      "operand must be in range [-255, 255]");
7845     }
7846     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7847         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7848       if (Inst.getOperand(0).getReg() == ARM::PC) {
7849         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7850                      "operand must be a register in range [r0, r14]");
7851       }
7852     }
7853     return false;
7854   }
7855 
7856   case ARM::t2LDRB_OFFSET_imm:
7857   case ARM::t2LDRB_PRE_imm:
7858   case ARM::t2LDRB_POST_imm:
7859   case ARM::t2STRB_OFFSET_imm:
7860   case ARM::t2STRB_PRE_imm:
7861   case ARM::t2STRB_POST_imm: {
7862     if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7863         Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7864         Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7865         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7866       int Imm = Inst.getOperand(2).getImm();
7867       if (Imm > 255 || Imm < -255)
7868         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7869                      "operand must be in range [-255, 255]");
7870     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7871                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7872       int Imm = Inst.getOperand(2).getImm();
7873       if (Imm > 0 || Imm < -255)
7874         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7875                      "operand must be in range [0, 255] with a negative sign");
7876     }
7877     if (Inst.getOperand(0).getReg() == ARM::PC) {
7878       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7879                    "if operand is PC, should call the LDRB (literal)");
7880     }
7881     return false;
7882   }
7883 
7884   case ARM::t2LDRH_OFFSET_imm:
7885   case ARM::t2LDRH_PRE_imm:
7886   case ARM::t2LDRH_POST_imm:
7887   case ARM::t2STRH_OFFSET_imm:
7888   case ARM::t2STRH_PRE_imm:
7889   case ARM::t2STRH_POST_imm: {
7890     if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7891         Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7892         Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7893         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7894       int Imm = Inst.getOperand(2).getImm();
7895       if (Imm > 255 || Imm < -255)
7896         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7897                      "operand must be in range [-255, 255]");
7898     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7899                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7900       int Imm = Inst.getOperand(2).getImm();
7901       if (Imm > 0 || Imm < -255)
7902         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7903                      "operand must be in range [0, 255] with a negative sign");
7904     }
7905     if (Inst.getOperand(0).getReg() == ARM::PC) {
7906       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7907                    "if operand is PC, should call the LDRH (literal)");
7908     }
7909     return false;
7910   }
7911 
7912   case ARM::t2LDRSB_OFFSET_imm:
7913   case ARM::t2LDRSB_PRE_imm:
7914   case ARM::t2LDRSB_POST_imm: {
7915     if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7916         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7917       int Imm = Inst.getOperand(2).getImm();
7918       if (Imm > 255 || Imm < -255)
7919         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7920                      "operand must be in range [-255, 255]");
7921     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7922       int Imm = Inst.getOperand(2).getImm();
7923       if (Imm > 0 || Imm < -255)
7924         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7925                      "operand must be in range [0, 255] with a negative sign");
7926     }
7927     if (Inst.getOperand(0).getReg() == ARM::PC) {
7928       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7929                    "if operand is PC, should call the LDRH (literal)");
7930     }
7931     return false;
7932   }
7933 
7934   case ARM::t2LDRSH_OFFSET_imm:
7935   case ARM::t2LDRSH_PRE_imm:
7936   case ARM::t2LDRSH_POST_imm: {
7937     if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7938         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7939       int Imm = Inst.getOperand(2).getImm();
7940       if (Imm > 255 || Imm < -255)
7941         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7942                      "operand must be in range [-255, 255]");
7943     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7944       int Imm = Inst.getOperand(2).getImm();
7945       if (Imm > 0 || Imm < -255)
7946         return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7947                      "operand must be in range [0, 255] with a negative sign");
7948     }
7949     if (Inst.getOperand(0).getReg() == ARM::PC) {
7950       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7951                    "if operand is PC, should call the LDRH (literal)");
7952     }
7953     return false;
7954   }
7955 
7956   case ARM::LDR_PRE_IMM:
7957   case ARM::LDR_PRE_REG:
7958   case ARM::t2LDR_PRE:
7959   case ARM::LDR_POST_IMM:
7960   case ARM::LDR_POST_REG:
7961   case ARM::t2LDR_POST:
7962   case ARM::LDRH_PRE:
7963   case ARM::t2LDRH_PRE:
7964   case ARM::LDRH_POST:
7965   case ARM::t2LDRH_POST:
7966   case ARM::LDRSH_PRE:
7967   case ARM::t2LDRSH_PRE:
7968   case ARM::LDRSH_POST:
7969   case ARM::t2LDRSH_POST:
7970   case ARM::LDRB_PRE_IMM:
7971   case ARM::LDRB_PRE_REG:
7972   case ARM::t2LDRB_PRE:
7973   case ARM::LDRB_POST_IMM:
7974   case ARM::LDRB_POST_REG:
7975   case ARM::t2LDRB_POST:
7976   case ARM::LDRSB_PRE:
7977   case ARM::t2LDRSB_PRE:
7978   case ARM::LDRSB_POST:
7979   case ARM::t2LDRSB_POST: {
7980     // Rt must be different from Rn.
7981     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7982     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7983 
7984     if (Rt == Rn)
7985       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7986                    "destination register and base register can't be identical");
7987     return false;
7988   }
7989 
7990   case ARM::MVE_VLDRBU8_rq:
7991   case ARM::MVE_VLDRBU16_rq:
7992   case ARM::MVE_VLDRBS16_rq:
7993   case ARM::MVE_VLDRBU32_rq:
7994   case ARM::MVE_VLDRBS32_rq:
7995   case ARM::MVE_VLDRHU16_rq:
7996   case ARM::MVE_VLDRHU16_rq_u:
7997   case ARM::MVE_VLDRHU32_rq:
7998   case ARM::MVE_VLDRHU32_rq_u:
7999   case ARM::MVE_VLDRHS32_rq:
8000   case ARM::MVE_VLDRHS32_rq_u:
8001   case ARM::MVE_VLDRWU32_rq:
8002   case ARM::MVE_VLDRWU32_rq_u:
8003   case ARM::MVE_VLDRDU64_rq:
8004   case ARM::MVE_VLDRDU64_rq_u:
8005   case ARM::MVE_VLDRWU32_qi:
8006   case ARM::MVE_VLDRWU32_qi_pre:
8007   case ARM::MVE_VLDRDU64_qi:
8008   case ARM::MVE_VLDRDU64_qi_pre: {
8009     // Qd must be different from Qm.
8010     unsigned QdIdx = 0, QmIdx = 2;
8011     bool QmIsPointer = false;
8012     switch (Opcode) {
8013     case ARM::MVE_VLDRWU32_qi:
8014     case ARM::MVE_VLDRDU64_qi:
8015       QmIdx = 1;
8016       QmIsPointer = true;
8017       break;
8018     case ARM::MVE_VLDRWU32_qi_pre:
8019     case ARM::MVE_VLDRDU64_qi_pre:
8020       QdIdx = 1;
8021       QmIsPointer = true;
8022       break;
8023     }
8024 
8025     const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
8026     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
8027 
8028     if (Qd == Qm) {
8029       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8030                    Twine("destination vector register and vector ") +
8031                        (QmIsPointer ? "pointer" : "offset") +
8032                        " register can't be identical");
8033     }
8034     return false;
8035   }
8036 
8037   case ARM::SBFX:
8038   case ARM::t2SBFX:
8039   case ARM::UBFX:
8040   case ARM::t2UBFX: {
8041     // Width must be in range [1, 32-lsb].
8042     unsigned LSB = Inst.getOperand(2).getImm();
8043     unsigned Widthm1 = Inst.getOperand(3).getImm();
8044     if (Widthm1 >= 32 - LSB)
8045       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8046                    "bitfield width must be in range [1,32-lsb]");
8047     return false;
8048   }
8049   // Notionally handles ARM::tLDMIA_UPD too.
8050   case ARM::tLDMIA: {
8051     // If we're parsing Thumb2, the .w variant is available and handles
8052     // most cases that are normally illegal for a Thumb1 LDM instruction.
8053     // We'll make the transformation in processInstruction() if necessary.
8054     //
8055     // Thumb LDM instructions are writeback iff the base register is not
8056     // in the register list.
8057     unsigned Rn = Inst.getOperand(0).getReg();
8058     bool HasWritebackToken =
8059         (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8060              .isToken() &&
8061          static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8062                  .getToken() == "!");
8063 
8064     bool ListContainsBase;
8065     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
8066       return Error(
8067           Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8068           "registers must be in range r0-r7");
8069     // If we should have writeback, then there should be a '!' token.
8070     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8071       return Error(
8072           Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8073           "writeback operator '!' expected");
8074     // If we should not have writeback, there must not be a '!'. This is
8075     // true even for the 32-bit wide encodings.
8076     if (ListContainsBase && HasWritebackToken)
8077       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8078                    "writeback operator '!' not allowed when base register "
8079                    "in register list");
8080 
8081     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8082       return true;
8083     break;
8084   }
8085   case ARM::LDMIA_UPD:
8086   case ARM::LDMDB_UPD:
8087   case ARM::LDMIB_UPD:
8088   case ARM::LDMDA_UPD:
8089     // ARM variants loading and updating the same register are only officially
8090     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
8091     if (!hasV7Ops())
8092       break;
8093     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8094       return Error(Operands.back()->getStartLoc(),
8095                    "writeback register not allowed in register list");
8096     break;
8097   case ARM::t2LDMIA:
8098   case ARM::t2LDMDB:
8099     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8100       return true;
8101     break;
8102   case ARM::t2STMIA:
8103   case ARM::t2STMDB:
8104     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8105       return true;
8106     break;
8107   case ARM::t2LDMIA_UPD:
8108   case ARM::t2LDMDB_UPD:
8109   case ARM::t2STMIA_UPD:
8110   case ARM::t2STMDB_UPD:
8111     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8112       return Error(Operands.back()->getStartLoc(),
8113                    "writeback register not allowed in register list");
8114 
8115     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8116       if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8117         return true;
8118     } else {
8119       if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8120         return true;
8121     }
8122     break;
8123 
8124   case ARM::sysLDMIA_UPD:
8125   case ARM::sysLDMDA_UPD:
8126   case ARM::sysLDMDB_UPD:
8127   case ARM::sysLDMIB_UPD:
8128     if (!listContainsReg(Inst, 3, ARM::PC))
8129       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8130                    "writeback register only allowed on system LDM "
8131                    "if PC in register-list");
8132     break;
8133   case ARM::sysSTMIA_UPD:
8134   case ARM::sysSTMDA_UPD:
8135   case ARM::sysSTMDB_UPD:
8136   case ARM::sysSTMIB_UPD:
8137     return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8138                  "system STM cannot have writeback register");
8139   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8140   // so only issue a diagnostic for thumb1. The instructions will be
8141   // switched to the t2 encodings in processInstruction() if necessary.
8142   case ARM::tPOP: {
8143     bool ListContainsBase;
8144     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8145         !isThumbTwo())
8146       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8147                    "registers must be in range r0-r7 or pc");
8148     if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 2, !isMClass()))
8149       return true;
8150     break;
8151   }
8152   case ARM::tPUSH: {
8153     bool ListContainsBase;
8154     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8155         !isThumbTwo())
8156       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8157                    "registers must be in range r0-r7 or lr");
8158     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 2))
8159       return true;
8160     break;
8161   }
8162   case ARM::tSTMIA_UPD: {
8163     bool ListContainsBase, InvalidLowList;
8164     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8165                                           0, ListContainsBase);
8166     if (InvalidLowList && !isThumbTwo())
8167       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8168                    "registers must be in range r0-r7");
8169 
8170     // This would be converted to a 32-bit stm, but that's not valid if the
8171     // writeback register is in the list.
8172     if (InvalidLowList && ListContainsBase)
8173       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8174                    "writeback operator '!' not allowed when base register "
8175                    "in register list");
8176 
8177     if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 4))
8178       return true;
8179     break;
8180   }
8181   case ARM::tADDrSP:
8182     // If the non-SP source operand and the destination operand are not the
8183     // same, we need thumb2 (for the wide encoding), or we have an error.
8184     if (!isThumbTwo() &&
8185         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8186       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8187                    "source register must be the same as destination");
8188     }
8189     break;
8190 
8191   case ARM::t2ADDrr:
8192   case ARM::t2ADDrs:
8193   case ARM::t2SUBrr:
8194   case ARM::t2SUBrs:
8195     if (Inst.getOperand(0).getReg() == ARM::SP &&
8196         Inst.getOperand(1).getReg() != ARM::SP)
8197       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8198                    "source register must be sp if destination is sp");
8199     break;
8200 
8201   // Final range checking for Thumb unconditional branch instructions.
8202   case ARM::tB:
8203     if (!(static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]))
8204              .isSignedOffset<11, 1>())
8205       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8206                    "branch target out of range");
8207     break;
8208   case ARM::t2B: {
8209     int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8210                                                     : MnemonicOpsEndInd + 1;
8211     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8212     // Delay the checks of symbolic expressions until they are resolved.
8213     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8214         !Operand.isSignedOffset<24, 1>())
8215       return Error(Operands[op]->getStartLoc(), "branch target out of range");
8216     break;
8217   }
8218   // Final range checking for Thumb conditional branch instructions.
8219   case ARM::tBcc:
8220     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8221              .isSignedOffset<8, 1>())
8222       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8223                    "branch target out of range");
8224     break;
8225   case ARM::t2Bcc: {
8226     int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8227                                                     : MnemonicOpsEndInd + 1;
8228     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8229       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8230     break;
8231   }
8232   case ARM::tCBZ:
8233   case ARM::tCBNZ: {
8234     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8235              .isUnsignedOffset<6, 1>())
8236       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8237                    "branch target out of range");
8238     break;
8239   }
8240   case ARM::MOVi16:
8241   case ARM::MOVTi16:
8242   case ARM::t2MOVi16:
8243   case ARM::t2MOVTi16:
8244     {
8245     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8246     // especially when we turn it into a movw and the expression <symbol> does
8247     // not have a :lower16: or :upper16 as part of the expression.  We don't
8248     // want the behavior of silently truncating, which can be unexpected and
8249     // lead to bugs that are difficult to find since this is an easy mistake
8250     // to make.
8251     int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8252                                                    : MnemonicOpsEndInd + 1;
8253     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8254     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8255     if (CE) break;
8256     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8257     if (!E) break;
8258     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8259     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8260                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8261       return Error(
8262           Op.getStartLoc(),
8263           "immediate expression for mov requires :lower16: or :upper16");
8264     break;
8265   }
8266   case ARM::tADDi8: {
8267     MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd + 1];
8268     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8269       return Error(Op.getStartLoc(),
8270                    "Immediate expression for Thumb adds requires :lower0_7:,"
8271                    " :lower8_15:, :upper0_7: or :upper8_15:");
8272     break;
8273   }
8274   case ARM::tMOVi8: {
8275     MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd];
8276     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8277       return Error(Op.getStartLoc(),
8278                    "Immediate expression for Thumb movs requires :lower0_7:,"
8279                    " :lower8_15:, :upper0_7: or :upper8_15:");
8280     break;
8281   }
8282   case ARM::HINT:
8283   case ARM::t2HINT: {
8284     unsigned Imm8 = Inst.getOperand(0).getImm();
8285     unsigned Pred = Inst.getOperand(1).getImm();
8286     // ESB is not predicable (pred must be AL). Without the RAS extension, this
8287     // behaves as any other unallocated hint.
8288     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8289       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8290                                                "predicable, but condition "
8291                                                "code specified");
8292     if (Imm8 == 0x14 && Pred != ARMCC::AL)
8293       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8294                                                "predicable, but condition "
8295                                                "code specified");
8296     break;
8297   }
8298   case ARM::t2BFi:
8299   case ARM::t2BFr:
8300   case ARM::t2BFLi:
8301   case ARM::t2BFLr: {
8302     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8303              .isUnsignedOffset<4, 1>() ||
8304         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) {
8305       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8306                    "branch location out of range or not a multiple of 2");
8307     }
8308 
8309     if (Opcode == ARM::t2BFi) {
8310       if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8311                .isSignedOffset<16, 1>())
8312         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8313                      "branch target out of range or not a multiple of 2");
8314     } else if (Opcode == ARM::t2BFLi) {
8315       if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8316                .isSignedOffset<18, 1>())
8317         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8318                      "branch target out of range or not a multiple of 2");
8319     }
8320     break;
8321   }
8322   case ARM::t2BFic: {
8323     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8324              .isUnsignedOffset<4, 1>() ||
8325         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8326       return Error(Operands[1]->getStartLoc(),
8327                    "branch location out of range or not a multiple of 2");
8328 
8329     if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8330              .isSignedOffset<16, 1>())
8331       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8332                    "branch target out of range or not a multiple of 2");
8333 
8334     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8335            "branch location and else branch target should either both be "
8336            "immediates or both labels");
8337 
8338     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8339       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8340       if (Diff != 4 && Diff != 2)
8341         return Error(
8342             Operands[3]->getStartLoc(),
8343             "else branch target must be 2 or 4 greater than the branch location");
8344     }
8345     break;
8346   }
8347   case ARM::t2CLRM: {
8348     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8349       if (Inst.getOperand(i).isReg() &&
8350           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8351               Inst.getOperand(i).getReg())) {
8352         return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8353                      "invalid register in register list. Valid registers are "
8354                      "r0-r12, lr/r14 and APSR.");
8355       }
8356     }
8357     break;
8358   }
8359   case ARM::DSB:
8360   case ARM::t2DSB: {
8361 
8362     if (Inst.getNumOperands() < 2)
8363       break;
8364 
8365     unsigned Option = Inst.getOperand(0).getImm();
8366     unsigned Pred = Inst.getOperand(1).getImm();
8367 
8368     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8369     if (Option == 0 && Pred != ARMCC::AL)
8370       return Error(Operands[1]->getStartLoc(),
8371                    "instruction 'ssbb' is not predicable, but condition code "
8372                    "specified");
8373     if (Option == 4 && Pred != ARMCC::AL)
8374       return Error(Operands[1]->getStartLoc(),
8375                    "instruction 'pssbb' is not predicable, but condition code "
8376                    "specified");
8377     break;
8378   }
8379   case ARM::VMOVRRS: {
8380     // Source registers must be sequential.
8381     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8382     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8383     if (Sm1 != Sm + 1)
8384       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8385                    "source operands must be sequential");
8386     break;
8387   }
8388   case ARM::VMOVSRR: {
8389     // Destination registers must be sequential.
8390     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8391     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8392     if (Sm1 != Sm + 1)
8393       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8394                    "destination operands must be sequential");
8395     break;
8396   }
8397   case ARM::VLDMDIA:
8398   case ARM::VSTMDIA: {
8399     ARMOperand &Op =
8400         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
8401     auto &RegList = Op.getRegList();
8402     if (RegList.size() < 1 || RegList.size() > 16)
8403       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8404                    "list of registers must be at least 1 and at most 16");
8405     break;
8406   }
8407   case ARM::MVE_VQDMULLs32bh:
8408   case ARM::MVE_VQDMULLs32th:
8409   case ARM::MVE_VCMULf32:
8410   case ARM::MVE_VMULLBs32:
8411   case ARM::MVE_VMULLTs32:
8412   case ARM::MVE_VMULLBu32:
8413   case ARM::MVE_VMULLTu32: {
8414     if (Operands[MnemonicOpsEndInd]->getReg() ==
8415         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8416       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8417                    "Qd register and Qn register can't be identical");
8418     }
8419     if (Operands[MnemonicOpsEndInd]->getReg() ==
8420         Operands[MnemonicOpsEndInd + 2]->getReg()) {
8421       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8422                    "Qd register and Qm register can't be identical");
8423     }
8424     break;
8425   }
8426   case ARM::MVE_VREV64_8:
8427   case ARM::MVE_VREV64_16:
8428   case ARM::MVE_VREV64_32:
8429   case ARM::MVE_VQDMULL_qr_s32bh:
8430   case ARM::MVE_VQDMULL_qr_s32th: {
8431     if (Operands[MnemonicOpsEndInd]->getReg() ==
8432         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8433       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8434                    "Qd register and Qn register can't be identical");
8435     }
8436     break;
8437   }
8438   case ARM::MVE_VCADDi32:
8439   case ARM::MVE_VCADDf32:
8440   case ARM::MVE_VHCADDs32: {
8441     if (Operands[MnemonicOpsEndInd]->getReg() ==
8442         Operands[MnemonicOpsEndInd + 2]->getReg()) {
8443       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8444                    "Qd register and Qm register can't be identical");
8445     }
8446     break;
8447   }
8448   case ARM::MVE_VMOV_rr_q: {
8449     if (Operands[MnemonicOpsEndInd + 2]->getReg() !=
8450         Operands[MnemonicOpsEndInd + 4]->getReg())
8451       return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8452                    "Q-registers must be the same");
8453     if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8454             .getVectorIndex() !=
8455         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 5])
8456                 .getVectorIndex() +
8457             2)
8458       return Error(Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8459                    "Q-register indexes must be 2 and 0 or 3 and 1");
8460     break;
8461   }
8462   case ARM::MVE_VMOV_q_rr: {
8463     if (Operands[MnemonicOpsEndInd]->getReg() !=
8464         Operands[MnemonicOpsEndInd + 2]->getReg())
8465       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8466                    "Q-registers must be the same");
8467     if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8468             .getVectorIndex() !=
8469         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8470                 .getVectorIndex() +
8471             2)
8472       return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8473                    "Q-register indexes must be 2 and 0 or 3 and 1");
8474     break;
8475   }
8476   case ARM::MVE_SQRSHR:
8477   case ARM::MVE_UQRSHL: {
8478     if (Operands[MnemonicOpsEndInd]->getReg() ==
8479         Operands[MnemonicOpsEndInd + 1]->getReg()) {
8480       return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8481                    "Rda register and Rm register can't be identical");
8482     }
8483     break;
8484   }
8485   case ARM::UMAAL:
8486   case ARM::UMLAL:
8487   case ARM::UMULL:
8488   case ARM::t2UMAAL:
8489   case ARM::t2UMLAL:
8490   case ARM::t2UMULL:
8491   case ARM::SMLAL:
8492   case ARM::SMLALBB:
8493   case ARM::SMLALBT:
8494   case ARM::SMLALD:
8495   case ARM::SMLALDX:
8496   case ARM::SMLALTB:
8497   case ARM::SMLALTT:
8498   case ARM::SMLSLD:
8499   case ARM::SMLSLDX:
8500   case ARM::SMULL:
8501   case ARM::t2SMLAL:
8502   case ARM::t2SMLALBB:
8503   case ARM::t2SMLALBT:
8504   case ARM::t2SMLALD:
8505   case ARM::t2SMLALDX:
8506   case ARM::t2SMLALTB:
8507   case ARM::t2SMLALTT:
8508   case ARM::t2SMLSLD:
8509   case ARM::t2SMLSLDX:
8510   case ARM::t2SMULL: {
8511     unsigned RdHi = Inst.getOperand(0).getReg();
8512     unsigned RdLo = Inst.getOperand(1).getReg();
8513     if(RdHi == RdLo) {
8514       return Error(Loc,
8515                    "unpredictable instruction, RdHi and RdLo must be different");
8516     }
8517     break;
8518   }
8519 
8520   case ARM::CDE_CX1:
8521   case ARM::CDE_CX1A:
8522   case ARM::CDE_CX1D:
8523   case ARM::CDE_CX1DA:
8524   case ARM::CDE_CX2:
8525   case ARM::CDE_CX2A:
8526   case ARM::CDE_CX2D:
8527   case ARM::CDE_CX2DA:
8528   case ARM::CDE_CX3:
8529   case ARM::CDE_CX3A:
8530   case ARM::CDE_CX3D:
8531   case ARM::CDE_CX3DA:
8532   case ARM::CDE_VCX1_vec:
8533   case ARM::CDE_VCX1_fpsp:
8534   case ARM::CDE_VCX1_fpdp:
8535   case ARM::CDE_VCX1A_vec:
8536   case ARM::CDE_VCX1A_fpsp:
8537   case ARM::CDE_VCX1A_fpdp:
8538   case ARM::CDE_VCX2_vec:
8539   case ARM::CDE_VCX2_fpsp:
8540   case ARM::CDE_VCX2_fpdp:
8541   case ARM::CDE_VCX2A_vec:
8542   case ARM::CDE_VCX2A_fpsp:
8543   case ARM::CDE_VCX2A_fpdp:
8544   case ARM::CDE_VCX3_vec:
8545   case ARM::CDE_VCX3_fpsp:
8546   case ARM::CDE_VCX3_fpdp:
8547   case ARM::CDE_VCX3A_vec:
8548   case ARM::CDE_VCX3A_fpsp:
8549   case ARM::CDE_VCX3A_fpdp: {
8550     assert(Inst.getOperand(1).isImm() &&
8551            "CDE operand 1 must be a coprocessor ID");
8552     int64_t Coproc = Inst.getOperand(1).getImm();
8553     if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8554       return Error(Operands[1]->getStartLoc(),
8555                    "coprocessor must be configured as CDE");
8556     else if (Coproc >= 8)
8557       return Error(Operands[1]->getStartLoc(),
8558                    "coprocessor must be in the range [p0, p7]");
8559     break;
8560   }
8561 
8562   case ARM::t2CDP:
8563   case ARM::t2CDP2:
8564   case ARM::t2LDC2L_OFFSET:
8565   case ARM::t2LDC2L_OPTION:
8566   case ARM::t2LDC2L_POST:
8567   case ARM::t2LDC2L_PRE:
8568   case ARM::t2LDC2_OFFSET:
8569   case ARM::t2LDC2_OPTION:
8570   case ARM::t2LDC2_POST:
8571   case ARM::t2LDC2_PRE:
8572   case ARM::t2LDCL_OFFSET:
8573   case ARM::t2LDCL_OPTION:
8574   case ARM::t2LDCL_POST:
8575   case ARM::t2LDCL_PRE:
8576   case ARM::t2LDC_OFFSET:
8577   case ARM::t2LDC_OPTION:
8578   case ARM::t2LDC_POST:
8579   case ARM::t2LDC_PRE:
8580   case ARM::t2MCR:
8581   case ARM::t2MCR2:
8582   case ARM::t2MCRR:
8583   case ARM::t2MCRR2:
8584   case ARM::t2MRC:
8585   case ARM::t2MRC2:
8586   case ARM::t2MRRC:
8587   case ARM::t2MRRC2:
8588   case ARM::t2STC2L_OFFSET:
8589   case ARM::t2STC2L_OPTION:
8590   case ARM::t2STC2L_POST:
8591   case ARM::t2STC2L_PRE:
8592   case ARM::t2STC2_OFFSET:
8593   case ARM::t2STC2_OPTION:
8594   case ARM::t2STC2_POST:
8595   case ARM::t2STC2_PRE:
8596   case ARM::t2STCL_OFFSET:
8597   case ARM::t2STCL_OPTION:
8598   case ARM::t2STCL_POST:
8599   case ARM::t2STCL_PRE:
8600   case ARM::t2STC_OFFSET:
8601   case ARM::t2STC_OPTION:
8602   case ARM::t2STC_POST:
8603   case ARM::t2STC_PRE: {
8604     unsigned Opcode = Inst.getOpcode();
8605     // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8606     // CopInd is the index of the coprocessor operand.
8607     size_t CopInd = 0;
8608     if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8609       CopInd = 2;
8610     else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8611       CopInd = 1;
8612     assert(Inst.getOperand(CopInd).isImm() &&
8613            "Operand must be a coprocessor ID");
8614     int64_t Coproc = Inst.getOperand(CopInd).getImm();
8615     // Operands[2] is the coprocessor operand at syntactic level
8616     if (ARM::isCDECoproc(Coproc, *STI))
8617       return Error(Operands[2]->getStartLoc(),
8618                    "coprocessor must be configured as GCP");
8619     break;
8620   }
8621   }
8622 
8623   return false;
8624 }
8625 
getRealVSTOpcode(unsigned Opc,unsigned & Spacing)8626 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8627   switch(Opc) {
8628   default: llvm_unreachable("unexpected opcode!");
8629   // VST1LN
8630   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8631   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8632   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8633   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8634   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8635   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8636   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
8637   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8638   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8639 
8640   // VST2LN
8641   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8642   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8643   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8644   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8645   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8646 
8647   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8648   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8649   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8650   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8651   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8652 
8653   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
8654   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8655   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8656   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8657   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8658 
8659   // VST3LN
8660   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8661   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8662   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8663   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8664   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8665   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8666   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8667   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8668   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8669   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8670   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
8671   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8672   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8673   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8674   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8675 
8676   // VST3
8677   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8678   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8679   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8680   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8681   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8682   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8683   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8684   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8685   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8686   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8687   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8688   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8689   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
8690   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8691   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8692   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
8693   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8694   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8695 
8696   // VST4LN
8697   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8698   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8699   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8700   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8701   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8702   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8703   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8704   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8705   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8706   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8707   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
8708   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8709   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8710   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8711   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8712 
8713   // VST4
8714   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8715   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8716   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8717   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8718   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8719   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8720   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8721   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8722   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8723   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8724   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8725   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8726   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
8727   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8728   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8729   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
8730   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8731   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8732   }
8733 }
8734 
getRealVLDOpcode(unsigned Opc,unsigned & Spacing)8735 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8736   switch(Opc) {
8737   default: llvm_unreachable("unexpected opcode!");
8738   // VLD1LN
8739   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8740   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8741   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8742   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8743   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8744   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8745   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
8746   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8747   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8748 
8749   // VLD2LN
8750   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8751   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8752   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8753   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8754   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8755   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8756   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8757   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8758   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8759   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8760   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
8761   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8762   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8763   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8764   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8765 
8766   // VLD3DUP
8767   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8768   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8769   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8770   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8771   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8772   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8773   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8774   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8775   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8776   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8777   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8778   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8779   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
8780   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8781   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8782   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8783   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8784   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8785 
8786   // VLD3LN
8787   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8788   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8789   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8790   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8791   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8792   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8793   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8794   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8795   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8796   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8797   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
8798   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8799   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8800   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8801   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8802 
8803   // VLD3
8804   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8805   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8806   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8807   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8808   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8809   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8810   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8811   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8812   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8813   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8814   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8815   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8816   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
8817   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8818   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8819   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
8820   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8821   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8822 
8823   // VLD4LN
8824   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8825   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8826   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8827   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8828   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8829   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8830   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8831   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8832   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8833   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8834   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
8835   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8836   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8837   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8838   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8839 
8840   // VLD4DUP
8841   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8842   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8843   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8844   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8845   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8846   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8847   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8848   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8849   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8850   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8851   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8852   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8853   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
8854   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8855   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8856   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8857   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8858   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8859 
8860   // VLD4
8861   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8862   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8863   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8864   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8865   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8866   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8867   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8868   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8869   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8870   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8871   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8872   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8873   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
8874   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8875   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8876   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
8877   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8878   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8879   }
8880 }
8881 
processInstruction(MCInst & Inst,const OperandVector & Operands,unsigned MnemonicOpsEndInd,MCStreamer & Out)8882 bool ARMAsmParser::processInstruction(MCInst &Inst,
8883                                       const OperandVector &Operands,
8884                                       unsigned MnemonicOpsEndInd,
8885                                       MCStreamer &Out) {
8886   // Check if we have the wide qualifier, because if it's present we
8887   // must avoid selecting a 16-bit thumb instruction.
8888   bool HasWideQualifier = false;
8889   for (auto &Op : Operands) {
8890     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8891     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8892       HasWideQualifier = true;
8893       break;
8894     }
8895   }
8896 
8897   switch (Inst.getOpcode()) {
8898   case ARM::VLLDM:
8899   case ARM::VLSTM: {
8900     // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
8901     // of T2
8902     if (Operands.size() ==
8903         MnemonicOpsEndInd + 2) { // a register list has been provided
8904       ARMOperand &Op = static_cast<ARMOperand &>(
8905           *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
8906       assert(Op.isDPRRegList());
8907       auto &RegList = Op.getRegList();
8908       // When the register list is {d0-d31} the instruction has to be the T2
8909       // variant
8910       if (RegList.size() == 32) {
8911         const unsigned Opcode =
8912             (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8913         MCInst TmpInst;
8914         TmpInst.setOpcode(Opcode);
8915         TmpInst.addOperand(Inst.getOperand(0));
8916         TmpInst.addOperand(Inst.getOperand(1));
8917         TmpInst.addOperand(Inst.getOperand(2));
8918         TmpInst.addOperand(Inst.getOperand(3));
8919         Inst = TmpInst;
8920         return true;
8921       }
8922     }
8923     return false;
8924   }
8925   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8926   case ARM::LDRT_POST:
8927   case ARM::LDRBT_POST: {
8928     const unsigned Opcode =
8929       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8930                                            : ARM::LDRBT_POST_IMM;
8931     MCInst TmpInst;
8932     TmpInst.setOpcode(Opcode);
8933     TmpInst.addOperand(Inst.getOperand(0));
8934     TmpInst.addOperand(Inst.getOperand(1));
8935     TmpInst.addOperand(Inst.getOperand(1));
8936     TmpInst.addOperand(MCOperand::createReg(0));
8937     TmpInst.addOperand(MCOperand::createImm(0));
8938     TmpInst.addOperand(Inst.getOperand(2));
8939     TmpInst.addOperand(Inst.getOperand(3));
8940     Inst = TmpInst;
8941     return true;
8942   }
8943   // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8944   case ARM::LDRSBTii:
8945   case ARM::LDRHTii:
8946   case ARM::LDRSHTii: {
8947     MCInst TmpInst;
8948 
8949     if (Inst.getOpcode() == ARM::LDRSBTii)
8950       TmpInst.setOpcode(ARM::LDRSBTi);
8951     else if (Inst.getOpcode() == ARM::LDRHTii)
8952       TmpInst.setOpcode(ARM::LDRHTi);
8953     else if (Inst.getOpcode() == ARM::LDRSHTii)
8954       TmpInst.setOpcode(ARM::LDRSHTi);
8955     TmpInst.addOperand(Inst.getOperand(0));
8956     TmpInst.addOperand(Inst.getOperand(1));
8957     TmpInst.addOperand(Inst.getOperand(1));
8958     TmpInst.addOperand(MCOperand::createImm(256));
8959     TmpInst.addOperand(Inst.getOperand(2));
8960     Inst = TmpInst;
8961     return true;
8962   }
8963   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8964   case ARM::STRT_POST:
8965   case ARM::STRBT_POST: {
8966     const unsigned Opcode =
8967       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8968                                            : ARM::STRBT_POST_IMM;
8969     MCInst TmpInst;
8970     TmpInst.setOpcode(Opcode);
8971     TmpInst.addOperand(Inst.getOperand(1));
8972     TmpInst.addOperand(Inst.getOperand(0));
8973     TmpInst.addOperand(Inst.getOperand(1));
8974     TmpInst.addOperand(MCOperand::createReg(0));
8975     TmpInst.addOperand(MCOperand::createImm(0));
8976     TmpInst.addOperand(Inst.getOperand(2));
8977     TmpInst.addOperand(Inst.getOperand(3));
8978     Inst = TmpInst;
8979     return true;
8980   }
8981   // Alias for alternate form of 'ADR Rd, #imm' instruction.
8982   case ARM::ADDri: {
8983     if (Inst.getOperand(1).getReg() != ARM::PC ||
8984         Inst.getOperand(5).getReg() != 0 ||
8985         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8986       return false;
8987     MCInst TmpInst;
8988     TmpInst.setOpcode(ARM::ADR);
8989     TmpInst.addOperand(Inst.getOperand(0));
8990     if (Inst.getOperand(2).isImm()) {
8991       // Immediate (mod_imm) will be in its encoded form, we must unencode it
8992       // before passing it to the ADR instruction.
8993       unsigned Enc = Inst.getOperand(2).getImm();
8994       TmpInst.addOperand(MCOperand::createImm(
8995           llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8996     } else {
8997       // Turn PC-relative expression into absolute expression.
8998       // Reading PC provides the start of the current instruction + 8 and
8999       // the transform to adr is biased by that.
9000       MCSymbol *Dot = getContext().createTempSymbol();
9001       Out.emitLabel(Dot);
9002       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
9003       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
9004                                                      MCSymbolRefExpr::VK_None,
9005                                                      getContext());
9006       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
9007       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
9008                                                      getContext());
9009       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
9010                                                         getContext());
9011       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
9012     }
9013     TmpInst.addOperand(Inst.getOperand(3));
9014     TmpInst.addOperand(Inst.getOperand(4));
9015     Inst = TmpInst;
9016     return true;
9017   }
9018   // Aliases for imm syntax of LDR instructions.
9019   case ARM::t2LDR_PRE_imm:
9020   case ARM::t2LDR_POST_imm: {
9021     MCInst TmpInst;
9022     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
9023                                                              : ARM::t2LDR_POST);
9024     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9025     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9026     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9027     TmpInst.addOperand(Inst.getOperand(2)); // imm
9028     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9029     Inst = TmpInst;
9030     return true;
9031   }
9032   // Aliases for imm syntax of STR instructions.
9033   case ARM::t2STR_PRE_imm:
9034   case ARM::t2STR_POST_imm: {
9035     MCInst TmpInst;
9036     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
9037                                                              : ARM::t2STR_POST);
9038     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9039     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9040     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9041     TmpInst.addOperand(Inst.getOperand(2)); // imm
9042     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9043     Inst = TmpInst;
9044     return true;
9045   }
9046   // Aliases for imm syntax of LDRB instructions.
9047   case ARM::t2LDRB_OFFSET_imm: {
9048     MCInst TmpInst;
9049     TmpInst.setOpcode(ARM::t2LDRBi8);
9050     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9051     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9052     TmpInst.addOperand(Inst.getOperand(2)); // imm
9053     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9054     Inst = TmpInst;
9055     return true;
9056   }
9057   case ARM::t2LDRB_PRE_imm:
9058   case ARM::t2LDRB_POST_imm: {
9059     MCInst TmpInst;
9060     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
9061                           ? ARM::t2LDRB_PRE
9062                           : ARM::t2LDRB_POST);
9063     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9064     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9065     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9066     TmpInst.addOperand(Inst.getOperand(2)); // imm
9067     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9068     Inst = TmpInst;
9069     return true;
9070   }
9071   // Aliases for imm syntax of STRB instructions.
9072   case ARM::t2STRB_OFFSET_imm: {
9073     MCInst TmpInst;
9074     TmpInst.setOpcode(ARM::t2STRBi8);
9075     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9076     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9077     TmpInst.addOperand(Inst.getOperand(2)); // imm
9078     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9079     Inst = TmpInst;
9080     return true;
9081   }
9082   case ARM::t2STRB_PRE_imm:
9083   case ARM::t2STRB_POST_imm: {
9084     MCInst TmpInst;
9085     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
9086                           ? ARM::t2STRB_PRE
9087                           : ARM::t2STRB_POST);
9088     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9089     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9090     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9091     TmpInst.addOperand(Inst.getOperand(2)); // imm
9092     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9093     Inst = TmpInst;
9094     return true;
9095   }
9096   // Aliases for imm syntax of LDRH instructions.
9097   case ARM::t2LDRH_OFFSET_imm: {
9098     MCInst TmpInst;
9099     TmpInst.setOpcode(ARM::t2LDRHi8);
9100     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9101     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9102     TmpInst.addOperand(Inst.getOperand(2)); // imm
9103     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9104     Inst = TmpInst;
9105     return true;
9106   }
9107   case ARM::t2LDRH_PRE_imm:
9108   case ARM::t2LDRH_POST_imm: {
9109     MCInst TmpInst;
9110     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
9111                           ? ARM::t2LDRH_PRE
9112                           : ARM::t2LDRH_POST);
9113     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9114     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9115     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9116     TmpInst.addOperand(Inst.getOperand(2)); // imm
9117     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9118     Inst = TmpInst;
9119     return true;
9120   }
9121   // Aliases for imm syntax of STRH instructions.
9122   case ARM::t2STRH_OFFSET_imm: {
9123     MCInst TmpInst;
9124     TmpInst.setOpcode(ARM::t2STRHi8);
9125     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9126     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9127     TmpInst.addOperand(Inst.getOperand(2)); // imm
9128     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9129     Inst = TmpInst;
9130     return true;
9131   }
9132   case ARM::t2STRH_PRE_imm:
9133   case ARM::t2STRH_POST_imm: {
9134     MCInst TmpInst;
9135     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9136                           ? ARM::t2STRH_PRE
9137                           : ARM::t2STRH_POST);
9138     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9139     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9140     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9141     TmpInst.addOperand(Inst.getOperand(2)); // imm
9142     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9143     Inst = TmpInst;
9144     return true;
9145   }
9146   // Aliases for imm syntax of LDRSB instructions.
9147   case ARM::t2LDRSB_OFFSET_imm: {
9148     MCInst TmpInst;
9149     TmpInst.setOpcode(ARM::t2LDRSBi8);
9150     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9151     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9152     TmpInst.addOperand(Inst.getOperand(2)); // imm
9153     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9154     Inst = TmpInst;
9155     return true;
9156   }
9157   case ARM::t2LDRSB_PRE_imm:
9158   case ARM::t2LDRSB_POST_imm: {
9159     MCInst TmpInst;
9160     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9161                           ? ARM::t2LDRSB_PRE
9162                           : ARM::t2LDRSB_POST);
9163     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9164     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9165     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9166     TmpInst.addOperand(Inst.getOperand(2)); // imm
9167     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9168     Inst = TmpInst;
9169     return true;
9170   }
9171   // Aliases for imm syntax of LDRSH instructions.
9172   case ARM::t2LDRSH_OFFSET_imm: {
9173     MCInst TmpInst;
9174     TmpInst.setOpcode(ARM::t2LDRSHi8);
9175     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9176     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9177     TmpInst.addOperand(Inst.getOperand(2)); // imm
9178     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9179     Inst = TmpInst;
9180     return true;
9181   }
9182   case ARM::t2LDRSH_PRE_imm:
9183   case ARM::t2LDRSH_POST_imm: {
9184     MCInst TmpInst;
9185     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9186                           ? ARM::t2LDRSH_PRE
9187                           : ARM::t2LDRSH_POST);
9188     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9189     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9190     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9191     TmpInst.addOperand(Inst.getOperand(2)); // imm
9192     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9193     Inst = TmpInst;
9194     return true;
9195   }
9196   // Aliases for alternate PC+imm syntax of LDR instructions.
9197   case ARM::t2LDRpcrel:
9198     // Select the narrow version if the immediate will fit.
9199     if (Inst.getOperand(1).getImm() > 0 &&
9200         Inst.getOperand(1).getImm() <= 0xff &&
9201         !HasWideQualifier)
9202       Inst.setOpcode(ARM::tLDRpci);
9203     else
9204       Inst.setOpcode(ARM::t2LDRpci);
9205     return true;
9206   case ARM::t2LDRBpcrel:
9207     Inst.setOpcode(ARM::t2LDRBpci);
9208     return true;
9209   case ARM::t2LDRHpcrel:
9210     Inst.setOpcode(ARM::t2LDRHpci);
9211     return true;
9212   case ARM::t2LDRSBpcrel:
9213     Inst.setOpcode(ARM::t2LDRSBpci);
9214     return true;
9215   case ARM::t2LDRSHpcrel:
9216     Inst.setOpcode(ARM::t2LDRSHpci);
9217     return true;
9218   case ARM::LDRConstPool:
9219   case ARM::tLDRConstPool:
9220   case ARM::t2LDRConstPool: {
9221     // Pseudo instruction ldr rt, =immediate is converted to a
9222     // MOV rt, immediate if immediate is known and representable
9223     // otherwise we create a constant pool entry that we load from.
9224     MCInst TmpInst;
9225     if (Inst.getOpcode() == ARM::LDRConstPool)
9226       TmpInst.setOpcode(ARM::LDRi12);
9227     else if (Inst.getOpcode() == ARM::tLDRConstPool)
9228       TmpInst.setOpcode(ARM::tLDRpci);
9229     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9230       TmpInst.setOpcode(ARM::t2LDRpci);
9231     const ARMOperand &PoolOperand =
9232         static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
9233     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9234     // If SubExprVal is a constant we may be able to use a MOV
9235     if (isa<MCConstantExpr>(SubExprVal) &&
9236         Inst.getOperand(0).getReg() != ARM::PC &&
9237         Inst.getOperand(0).getReg() != ARM::SP) {
9238       int64_t Value =
9239         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9240       bool UseMov  = true;
9241       bool MovHasS = true;
9242       if (Inst.getOpcode() == ARM::LDRConstPool) {
9243         // ARM Constant
9244         if (ARM_AM::getSOImmVal(Value) != -1) {
9245           Value = ARM_AM::getSOImmVal(Value);
9246           TmpInst.setOpcode(ARM::MOVi);
9247         }
9248         else if (ARM_AM::getSOImmVal(~Value) != -1) {
9249           Value = ARM_AM::getSOImmVal(~Value);
9250           TmpInst.setOpcode(ARM::MVNi);
9251         }
9252         else if (hasV6T2Ops() &&
9253                  Value >=0 && Value < 65536) {
9254           TmpInst.setOpcode(ARM::MOVi16);
9255           MovHasS = false;
9256         }
9257         else
9258           UseMov = false;
9259       }
9260       else {
9261         // Thumb/Thumb2 Constant
9262         if (hasThumb2() &&
9263             ARM_AM::getT2SOImmVal(Value) != -1)
9264           TmpInst.setOpcode(ARM::t2MOVi);
9265         else if (hasThumb2() &&
9266                  ARM_AM::getT2SOImmVal(~Value) != -1) {
9267           TmpInst.setOpcode(ARM::t2MVNi);
9268           Value = ~Value;
9269         }
9270         else if (hasV8MBaseline() &&
9271                  Value >=0 && Value < 65536) {
9272           TmpInst.setOpcode(ARM::t2MOVi16);
9273           MovHasS = false;
9274         }
9275         else
9276           UseMov = false;
9277       }
9278       if (UseMov) {
9279         TmpInst.addOperand(Inst.getOperand(0));           // Rt
9280         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
9281         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9282         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9283         if (MovHasS)
9284           TmpInst.addOperand(MCOperand::createReg(0));    // S
9285         Inst = TmpInst;
9286         return true;
9287       }
9288     }
9289     // No opportunity to use MOV/MVN create constant pool
9290     const MCExpr *CPLoc =
9291       getTargetStreamer().addConstantPoolEntry(SubExprVal,
9292                                                PoolOperand.getStartLoc());
9293     TmpInst.addOperand(Inst.getOperand(0));           // Rt
9294     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9295     if (TmpInst.getOpcode() == ARM::LDRi12)
9296       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
9297     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9298     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9299     Inst = TmpInst;
9300     return true;
9301   }
9302   // Handle NEON VST complex aliases.
9303   case ARM::VST1LNdWB_register_Asm_8:
9304   case ARM::VST1LNdWB_register_Asm_16:
9305   case ARM::VST1LNdWB_register_Asm_32: {
9306     MCInst TmpInst;
9307     // Shuffle the operands around so the lane index operand is in the
9308     // right place.
9309     unsigned Spacing;
9310     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9311     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9312     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9313     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9314     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9315     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9316     TmpInst.addOperand(Inst.getOperand(1)); // lane
9317     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9318     TmpInst.addOperand(Inst.getOperand(6));
9319     Inst = TmpInst;
9320     return true;
9321   }
9322 
9323   case ARM::VST2LNdWB_register_Asm_8:
9324   case ARM::VST2LNdWB_register_Asm_16:
9325   case ARM::VST2LNdWB_register_Asm_32:
9326   case ARM::VST2LNqWB_register_Asm_16:
9327   case ARM::VST2LNqWB_register_Asm_32: {
9328     MCInst TmpInst;
9329     // Shuffle the operands around so the lane index operand is in the
9330     // right place.
9331     unsigned Spacing;
9332     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9333     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9334     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9335     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9336     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9337     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9338     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9339                                             Spacing));
9340     TmpInst.addOperand(Inst.getOperand(1)); // lane
9341     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9342     TmpInst.addOperand(Inst.getOperand(6));
9343     Inst = TmpInst;
9344     return true;
9345   }
9346 
9347   case ARM::VST3LNdWB_register_Asm_8:
9348   case ARM::VST3LNdWB_register_Asm_16:
9349   case ARM::VST3LNdWB_register_Asm_32:
9350   case ARM::VST3LNqWB_register_Asm_16:
9351   case ARM::VST3LNqWB_register_Asm_32: {
9352     MCInst TmpInst;
9353     // Shuffle the operands around so the lane index operand is in the
9354     // right place.
9355     unsigned Spacing;
9356     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9357     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9358     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9359     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9360     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9361     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9362     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9363                                             Spacing));
9364     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9365                                             Spacing * 2));
9366     TmpInst.addOperand(Inst.getOperand(1)); // lane
9367     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9368     TmpInst.addOperand(Inst.getOperand(6));
9369     Inst = TmpInst;
9370     return true;
9371   }
9372 
9373   case ARM::VST4LNdWB_register_Asm_8:
9374   case ARM::VST4LNdWB_register_Asm_16:
9375   case ARM::VST4LNdWB_register_Asm_32:
9376   case ARM::VST4LNqWB_register_Asm_16:
9377   case ARM::VST4LNqWB_register_Asm_32: {
9378     MCInst TmpInst;
9379     // Shuffle the operands around so the lane index operand is in the
9380     // right place.
9381     unsigned Spacing;
9382     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9383     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9384     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9385     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9386     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9387     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9388     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9389                                             Spacing));
9390     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9391                                             Spacing * 2));
9392     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9393                                             Spacing * 3));
9394     TmpInst.addOperand(Inst.getOperand(1)); // lane
9395     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9396     TmpInst.addOperand(Inst.getOperand(6));
9397     Inst = TmpInst;
9398     return true;
9399   }
9400 
9401   case ARM::VST1LNdWB_fixed_Asm_8:
9402   case ARM::VST1LNdWB_fixed_Asm_16:
9403   case ARM::VST1LNdWB_fixed_Asm_32: {
9404     MCInst TmpInst;
9405     // Shuffle the operands around so the lane index operand is in the
9406     // right place.
9407     unsigned Spacing;
9408     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9409     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9410     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9411     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9412     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9413     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9414     TmpInst.addOperand(Inst.getOperand(1)); // lane
9415     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9416     TmpInst.addOperand(Inst.getOperand(5));
9417     Inst = TmpInst;
9418     return true;
9419   }
9420 
9421   case ARM::VST2LNdWB_fixed_Asm_8:
9422   case ARM::VST2LNdWB_fixed_Asm_16:
9423   case ARM::VST2LNdWB_fixed_Asm_32:
9424   case ARM::VST2LNqWB_fixed_Asm_16:
9425   case ARM::VST2LNqWB_fixed_Asm_32: {
9426     MCInst TmpInst;
9427     // Shuffle the operands around so the lane index operand is in the
9428     // right place.
9429     unsigned Spacing;
9430     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9431     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9432     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9433     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9434     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9435     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9436     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9437                                             Spacing));
9438     TmpInst.addOperand(Inst.getOperand(1)); // lane
9439     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9440     TmpInst.addOperand(Inst.getOperand(5));
9441     Inst = TmpInst;
9442     return true;
9443   }
9444 
9445   case ARM::VST3LNdWB_fixed_Asm_8:
9446   case ARM::VST3LNdWB_fixed_Asm_16:
9447   case ARM::VST3LNdWB_fixed_Asm_32:
9448   case ARM::VST3LNqWB_fixed_Asm_16:
9449   case ARM::VST3LNqWB_fixed_Asm_32: {
9450     MCInst TmpInst;
9451     // Shuffle the operands around so the lane index operand is in the
9452     // right place.
9453     unsigned Spacing;
9454     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9455     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9456     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9457     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9458     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9459     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9460     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9461                                             Spacing));
9462     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9463                                             Spacing * 2));
9464     TmpInst.addOperand(Inst.getOperand(1)); // lane
9465     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9466     TmpInst.addOperand(Inst.getOperand(5));
9467     Inst = TmpInst;
9468     return true;
9469   }
9470 
9471   case ARM::VST4LNdWB_fixed_Asm_8:
9472   case ARM::VST4LNdWB_fixed_Asm_16:
9473   case ARM::VST4LNdWB_fixed_Asm_32:
9474   case ARM::VST4LNqWB_fixed_Asm_16:
9475   case ARM::VST4LNqWB_fixed_Asm_32: {
9476     MCInst TmpInst;
9477     // Shuffle the operands around so the lane index operand is in the
9478     // right place.
9479     unsigned Spacing;
9480     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9481     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9482     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9483     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9484     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9485     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9486     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9487                                             Spacing));
9488     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9489                                             Spacing * 2));
9490     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9491                                             Spacing * 3));
9492     TmpInst.addOperand(Inst.getOperand(1)); // lane
9493     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9494     TmpInst.addOperand(Inst.getOperand(5));
9495     Inst = TmpInst;
9496     return true;
9497   }
9498 
9499   case ARM::VST1LNdAsm_8:
9500   case ARM::VST1LNdAsm_16:
9501   case ARM::VST1LNdAsm_32: {
9502     MCInst TmpInst;
9503     // Shuffle the operands around so the lane index operand is in the
9504     // right place.
9505     unsigned Spacing;
9506     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9507     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9508     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9509     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9510     TmpInst.addOperand(Inst.getOperand(1)); // lane
9511     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9512     TmpInst.addOperand(Inst.getOperand(5));
9513     Inst = TmpInst;
9514     return true;
9515   }
9516 
9517   case ARM::VST2LNdAsm_8:
9518   case ARM::VST2LNdAsm_16:
9519   case ARM::VST2LNdAsm_32:
9520   case ARM::VST2LNqAsm_16:
9521   case ARM::VST2LNqAsm_32: {
9522     MCInst TmpInst;
9523     // Shuffle the operands around so the lane index operand is in the
9524     // right place.
9525     unsigned Spacing;
9526     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9527     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9528     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9529     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9530     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9531                                             Spacing));
9532     TmpInst.addOperand(Inst.getOperand(1)); // lane
9533     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9534     TmpInst.addOperand(Inst.getOperand(5));
9535     Inst = TmpInst;
9536     return true;
9537   }
9538 
9539   case ARM::VST3LNdAsm_8:
9540   case ARM::VST3LNdAsm_16:
9541   case ARM::VST3LNdAsm_32:
9542   case ARM::VST3LNqAsm_16:
9543   case ARM::VST3LNqAsm_32: {
9544     MCInst TmpInst;
9545     // Shuffle the operands around so the lane index operand is in the
9546     // right place.
9547     unsigned Spacing;
9548     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9549     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9550     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9551     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9552     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9553                                             Spacing));
9554     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9555                                             Spacing * 2));
9556     TmpInst.addOperand(Inst.getOperand(1)); // lane
9557     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9558     TmpInst.addOperand(Inst.getOperand(5));
9559     Inst = TmpInst;
9560     return true;
9561   }
9562 
9563   case ARM::VST4LNdAsm_8:
9564   case ARM::VST4LNdAsm_16:
9565   case ARM::VST4LNdAsm_32:
9566   case ARM::VST4LNqAsm_16:
9567   case ARM::VST4LNqAsm_32: {
9568     MCInst TmpInst;
9569     // Shuffle the operands around so the lane index operand is in the
9570     // right place.
9571     unsigned Spacing;
9572     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9573     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9574     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9575     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9576     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9577                                             Spacing));
9578     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9579                                             Spacing * 2));
9580     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9581                                             Spacing * 3));
9582     TmpInst.addOperand(Inst.getOperand(1)); // lane
9583     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9584     TmpInst.addOperand(Inst.getOperand(5));
9585     Inst = TmpInst;
9586     return true;
9587   }
9588 
9589   // Handle NEON VLD complex aliases.
9590   case ARM::VLD1LNdWB_register_Asm_8:
9591   case ARM::VLD1LNdWB_register_Asm_16:
9592   case ARM::VLD1LNdWB_register_Asm_32: {
9593     MCInst TmpInst;
9594     // Shuffle the operands around so the lane index operand is in the
9595     // right place.
9596     unsigned Spacing;
9597     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9598     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9599     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9600     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9601     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9602     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9603     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9604     TmpInst.addOperand(Inst.getOperand(1)); // lane
9605     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9606     TmpInst.addOperand(Inst.getOperand(6));
9607     Inst = TmpInst;
9608     return true;
9609   }
9610 
9611   case ARM::VLD2LNdWB_register_Asm_8:
9612   case ARM::VLD2LNdWB_register_Asm_16:
9613   case ARM::VLD2LNdWB_register_Asm_32:
9614   case ARM::VLD2LNqWB_register_Asm_16:
9615   case ARM::VLD2LNqWB_register_Asm_32: {
9616     MCInst TmpInst;
9617     // Shuffle the operands around so the lane index operand is in the
9618     // right place.
9619     unsigned Spacing;
9620     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9621     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9622     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9623                                             Spacing));
9624     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9625     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9626     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9627     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9628     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9629     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9630                                             Spacing));
9631     TmpInst.addOperand(Inst.getOperand(1)); // lane
9632     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9633     TmpInst.addOperand(Inst.getOperand(6));
9634     Inst = TmpInst;
9635     return true;
9636   }
9637 
9638   case ARM::VLD3LNdWB_register_Asm_8:
9639   case ARM::VLD3LNdWB_register_Asm_16:
9640   case ARM::VLD3LNdWB_register_Asm_32:
9641   case ARM::VLD3LNqWB_register_Asm_16:
9642   case ARM::VLD3LNqWB_register_Asm_32: {
9643     MCInst TmpInst;
9644     // Shuffle the operands around so the lane index operand is in the
9645     // right place.
9646     unsigned Spacing;
9647     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9648     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9649     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9650                                             Spacing));
9651     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9652                                             Spacing * 2));
9653     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9654     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9655     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9656     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9657     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9658     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9659                                             Spacing));
9660     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9661                                             Spacing * 2));
9662     TmpInst.addOperand(Inst.getOperand(1)); // lane
9663     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9664     TmpInst.addOperand(Inst.getOperand(6));
9665     Inst = TmpInst;
9666     return true;
9667   }
9668 
9669   case ARM::VLD4LNdWB_register_Asm_8:
9670   case ARM::VLD4LNdWB_register_Asm_16:
9671   case ARM::VLD4LNdWB_register_Asm_32:
9672   case ARM::VLD4LNqWB_register_Asm_16:
9673   case ARM::VLD4LNqWB_register_Asm_32: {
9674     MCInst TmpInst;
9675     // Shuffle the operands around so the lane index operand is in the
9676     // right place.
9677     unsigned Spacing;
9678     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9679     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9680     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9681                                             Spacing));
9682     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9683                                             Spacing * 2));
9684     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9685                                             Spacing * 3));
9686     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9687     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9688     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9689     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9690     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9691     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9692                                             Spacing));
9693     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9694                                             Spacing * 2));
9695     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9696                                             Spacing * 3));
9697     TmpInst.addOperand(Inst.getOperand(1)); // lane
9698     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9699     TmpInst.addOperand(Inst.getOperand(6));
9700     Inst = TmpInst;
9701     return true;
9702   }
9703 
9704   case ARM::VLD1LNdWB_fixed_Asm_8:
9705   case ARM::VLD1LNdWB_fixed_Asm_16:
9706   case ARM::VLD1LNdWB_fixed_Asm_32: {
9707     MCInst TmpInst;
9708     // Shuffle the operands around so the lane index operand is in the
9709     // right place.
9710     unsigned Spacing;
9711     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9712     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9713     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9714     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9715     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9716     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9717     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9718     TmpInst.addOperand(Inst.getOperand(1)); // lane
9719     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9720     TmpInst.addOperand(Inst.getOperand(5));
9721     Inst = TmpInst;
9722     return true;
9723   }
9724 
9725   case ARM::VLD2LNdWB_fixed_Asm_8:
9726   case ARM::VLD2LNdWB_fixed_Asm_16:
9727   case ARM::VLD2LNdWB_fixed_Asm_32:
9728   case ARM::VLD2LNqWB_fixed_Asm_16:
9729   case ARM::VLD2LNqWB_fixed_Asm_32: {
9730     MCInst TmpInst;
9731     // Shuffle the operands around so the lane index operand is in the
9732     // right place.
9733     unsigned Spacing;
9734     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9735     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9736     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9737                                             Spacing));
9738     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9739     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9740     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9741     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9742     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9743     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9744                                             Spacing));
9745     TmpInst.addOperand(Inst.getOperand(1)); // lane
9746     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9747     TmpInst.addOperand(Inst.getOperand(5));
9748     Inst = TmpInst;
9749     return true;
9750   }
9751 
9752   case ARM::VLD3LNdWB_fixed_Asm_8:
9753   case ARM::VLD3LNdWB_fixed_Asm_16:
9754   case ARM::VLD3LNdWB_fixed_Asm_32:
9755   case ARM::VLD3LNqWB_fixed_Asm_16:
9756   case ARM::VLD3LNqWB_fixed_Asm_32: {
9757     MCInst TmpInst;
9758     // Shuffle the operands around so the lane index operand is in the
9759     // right place.
9760     unsigned Spacing;
9761     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9762     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9763     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9764                                             Spacing));
9765     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9766                                             Spacing * 2));
9767     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9768     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9769     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9770     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9771     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9772     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9773                                             Spacing));
9774     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9775                                             Spacing * 2));
9776     TmpInst.addOperand(Inst.getOperand(1)); // lane
9777     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9778     TmpInst.addOperand(Inst.getOperand(5));
9779     Inst = TmpInst;
9780     return true;
9781   }
9782 
9783   case ARM::VLD4LNdWB_fixed_Asm_8:
9784   case ARM::VLD4LNdWB_fixed_Asm_16:
9785   case ARM::VLD4LNdWB_fixed_Asm_32:
9786   case ARM::VLD4LNqWB_fixed_Asm_16:
9787   case ARM::VLD4LNqWB_fixed_Asm_32: {
9788     MCInst TmpInst;
9789     // Shuffle the operands around so the lane index operand is in the
9790     // right place.
9791     unsigned Spacing;
9792     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9793     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9794     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9795                                             Spacing));
9796     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9797                                             Spacing * 2));
9798     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9799                                             Spacing * 3));
9800     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9801     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9802     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9803     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9804     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9805     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9806                                             Spacing));
9807     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9808                                             Spacing * 2));
9809     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9810                                             Spacing * 3));
9811     TmpInst.addOperand(Inst.getOperand(1)); // lane
9812     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9813     TmpInst.addOperand(Inst.getOperand(5));
9814     Inst = TmpInst;
9815     return true;
9816   }
9817 
9818   case ARM::VLD1LNdAsm_8:
9819   case ARM::VLD1LNdAsm_16:
9820   case ARM::VLD1LNdAsm_32: {
9821     MCInst TmpInst;
9822     // Shuffle the operands around so the lane index operand is in the
9823     // right place.
9824     unsigned Spacing;
9825     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9826     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9827     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9828     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9829     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9830     TmpInst.addOperand(Inst.getOperand(1)); // lane
9831     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9832     TmpInst.addOperand(Inst.getOperand(5));
9833     Inst = TmpInst;
9834     return true;
9835   }
9836 
9837   case ARM::VLD2LNdAsm_8:
9838   case ARM::VLD2LNdAsm_16:
9839   case ARM::VLD2LNdAsm_32:
9840   case ARM::VLD2LNqAsm_16:
9841   case ARM::VLD2LNqAsm_32: {
9842     MCInst TmpInst;
9843     // Shuffle the operands around so the lane index operand is in the
9844     // right place.
9845     unsigned Spacing;
9846     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9847     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9848     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9849                                             Spacing));
9850     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9851     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9852     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9853     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9854                                             Spacing));
9855     TmpInst.addOperand(Inst.getOperand(1)); // lane
9856     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9857     TmpInst.addOperand(Inst.getOperand(5));
9858     Inst = TmpInst;
9859     return true;
9860   }
9861 
9862   case ARM::VLD3LNdAsm_8:
9863   case ARM::VLD3LNdAsm_16:
9864   case ARM::VLD3LNdAsm_32:
9865   case ARM::VLD3LNqAsm_16:
9866   case ARM::VLD3LNqAsm_32: {
9867     MCInst TmpInst;
9868     // Shuffle the operands around so the lane index operand is in the
9869     // right place.
9870     unsigned Spacing;
9871     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9872     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9873     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9874                                             Spacing));
9875     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9876                                             Spacing * 2));
9877     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9878     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9879     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9880     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9881                                             Spacing));
9882     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9883                                             Spacing * 2));
9884     TmpInst.addOperand(Inst.getOperand(1)); // lane
9885     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9886     TmpInst.addOperand(Inst.getOperand(5));
9887     Inst = TmpInst;
9888     return true;
9889   }
9890 
9891   case ARM::VLD4LNdAsm_8:
9892   case ARM::VLD4LNdAsm_16:
9893   case ARM::VLD4LNdAsm_32:
9894   case ARM::VLD4LNqAsm_16:
9895   case ARM::VLD4LNqAsm_32: {
9896     MCInst TmpInst;
9897     // Shuffle the operands around so the lane index operand is in the
9898     // right place.
9899     unsigned Spacing;
9900     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9901     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9902     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9903                                             Spacing));
9904     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9905                                             Spacing * 2));
9906     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9907                                             Spacing * 3));
9908     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9909     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9910     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9911     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9912                                             Spacing));
9913     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9914                                             Spacing * 2));
9915     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9916                                             Spacing * 3));
9917     TmpInst.addOperand(Inst.getOperand(1)); // lane
9918     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9919     TmpInst.addOperand(Inst.getOperand(5));
9920     Inst = TmpInst;
9921     return true;
9922   }
9923 
9924   // VLD3DUP single 3-element structure to all lanes instructions.
9925   case ARM::VLD3DUPdAsm_8:
9926   case ARM::VLD3DUPdAsm_16:
9927   case ARM::VLD3DUPdAsm_32:
9928   case ARM::VLD3DUPqAsm_8:
9929   case ARM::VLD3DUPqAsm_16:
9930   case ARM::VLD3DUPqAsm_32: {
9931     MCInst TmpInst;
9932     unsigned Spacing;
9933     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9934     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9935     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9936                                             Spacing));
9937     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9938                                             Spacing * 2));
9939     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9940     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9941     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9942     TmpInst.addOperand(Inst.getOperand(4));
9943     Inst = TmpInst;
9944     return true;
9945   }
9946 
9947   case ARM::VLD3DUPdWB_fixed_Asm_8:
9948   case ARM::VLD3DUPdWB_fixed_Asm_16:
9949   case ARM::VLD3DUPdWB_fixed_Asm_32:
9950   case ARM::VLD3DUPqWB_fixed_Asm_8:
9951   case ARM::VLD3DUPqWB_fixed_Asm_16:
9952   case ARM::VLD3DUPqWB_fixed_Asm_32: {
9953     MCInst TmpInst;
9954     unsigned Spacing;
9955     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9956     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9957     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9958                                             Spacing));
9959     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9960                                             Spacing * 2));
9961     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9962     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9963     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9964     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9965     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9966     TmpInst.addOperand(Inst.getOperand(4));
9967     Inst = TmpInst;
9968     return true;
9969   }
9970 
9971   case ARM::VLD3DUPdWB_register_Asm_8:
9972   case ARM::VLD3DUPdWB_register_Asm_16:
9973   case ARM::VLD3DUPdWB_register_Asm_32:
9974   case ARM::VLD3DUPqWB_register_Asm_8:
9975   case ARM::VLD3DUPqWB_register_Asm_16:
9976   case ARM::VLD3DUPqWB_register_Asm_32: {
9977     MCInst TmpInst;
9978     unsigned Spacing;
9979     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9980     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9981     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9982                                             Spacing));
9983     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9984                                             Spacing * 2));
9985     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9986     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9987     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9988     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9989     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9990     TmpInst.addOperand(Inst.getOperand(5));
9991     Inst = TmpInst;
9992     return true;
9993   }
9994 
9995   // VLD3 multiple 3-element structure instructions.
9996   case ARM::VLD3dAsm_8:
9997   case ARM::VLD3dAsm_16:
9998   case ARM::VLD3dAsm_32:
9999   case ARM::VLD3qAsm_8:
10000   case ARM::VLD3qAsm_16:
10001   case ARM::VLD3qAsm_32: {
10002     MCInst TmpInst;
10003     unsigned Spacing;
10004     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10005     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10006     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10007                                             Spacing));
10008     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10009                                             Spacing * 2));
10010     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10011     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10012     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10013     TmpInst.addOperand(Inst.getOperand(4));
10014     Inst = TmpInst;
10015     return true;
10016   }
10017 
10018   case ARM::VLD3dWB_fixed_Asm_8:
10019   case ARM::VLD3dWB_fixed_Asm_16:
10020   case ARM::VLD3dWB_fixed_Asm_32:
10021   case ARM::VLD3qWB_fixed_Asm_8:
10022   case ARM::VLD3qWB_fixed_Asm_16:
10023   case ARM::VLD3qWB_fixed_Asm_32: {
10024     MCInst TmpInst;
10025     unsigned Spacing;
10026     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10027     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10028     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10029                                             Spacing));
10030     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10031                                             Spacing * 2));
10032     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10033     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10034     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10035     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10036     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10037     TmpInst.addOperand(Inst.getOperand(4));
10038     Inst = TmpInst;
10039     return true;
10040   }
10041 
10042   case ARM::VLD3dWB_register_Asm_8:
10043   case ARM::VLD3dWB_register_Asm_16:
10044   case ARM::VLD3dWB_register_Asm_32:
10045   case ARM::VLD3qWB_register_Asm_8:
10046   case ARM::VLD3qWB_register_Asm_16:
10047   case ARM::VLD3qWB_register_Asm_32: {
10048     MCInst TmpInst;
10049     unsigned Spacing;
10050     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10051     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10052     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10053                                             Spacing));
10054     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10055                                             Spacing * 2));
10056     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10057     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10058     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10059     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10060     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10061     TmpInst.addOperand(Inst.getOperand(5));
10062     Inst = TmpInst;
10063     return true;
10064   }
10065 
10066   // VLD4DUP single 3-element structure to all lanes instructions.
10067   case ARM::VLD4DUPdAsm_8:
10068   case ARM::VLD4DUPdAsm_16:
10069   case ARM::VLD4DUPdAsm_32:
10070   case ARM::VLD4DUPqAsm_8:
10071   case ARM::VLD4DUPqAsm_16:
10072   case ARM::VLD4DUPqAsm_32: {
10073     MCInst TmpInst;
10074     unsigned Spacing;
10075     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10076     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10077     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10078                                             Spacing));
10079     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10080                                             Spacing * 2));
10081     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10082                                             Spacing * 3));
10083     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10084     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10085     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10086     TmpInst.addOperand(Inst.getOperand(4));
10087     Inst = TmpInst;
10088     return true;
10089   }
10090 
10091   case ARM::VLD4DUPdWB_fixed_Asm_8:
10092   case ARM::VLD4DUPdWB_fixed_Asm_16:
10093   case ARM::VLD4DUPdWB_fixed_Asm_32:
10094   case ARM::VLD4DUPqWB_fixed_Asm_8:
10095   case ARM::VLD4DUPqWB_fixed_Asm_16:
10096   case ARM::VLD4DUPqWB_fixed_Asm_32: {
10097     MCInst TmpInst;
10098     unsigned Spacing;
10099     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10100     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10101     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10102                                             Spacing));
10103     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10104                                             Spacing * 2));
10105     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10106                                             Spacing * 3));
10107     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10108     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10109     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10110     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10111     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10112     TmpInst.addOperand(Inst.getOperand(4));
10113     Inst = TmpInst;
10114     return true;
10115   }
10116 
10117   case ARM::VLD4DUPdWB_register_Asm_8:
10118   case ARM::VLD4DUPdWB_register_Asm_16:
10119   case ARM::VLD4DUPdWB_register_Asm_32:
10120   case ARM::VLD4DUPqWB_register_Asm_8:
10121   case ARM::VLD4DUPqWB_register_Asm_16:
10122   case ARM::VLD4DUPqWB_register_Asm_32: {
10123     MCInst TmpInst;
10124     unsigned Spacing;
10125     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10126     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10127     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10128                                             Spacing));
10129     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10130                                             Spacing * 2));
10131     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10132                                             Spacing * 3));
10133     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10134     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10135     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10136     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10137     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10138     TmpInst.addOperand(Inst.getOperand(5));
10139     Inst = TmpInst;
10140     return true;
10141   }
10142 
10143   // VLD4 multiple 4-element structure instructions.
10144   case ARM::VLD4dAsm_8:
10145   case ARM::VLD4dAsm_16:
10146   case ARM::VLD4dAsm_32:
10147   case ARM::VLD4qAsm_8:
10148   case ARM::VLD4qAsm_16:
10149   case ARM::VLD4qAsm_32: {
10150     MCInst TmpInst;
10151     unsigned Spacing;
10152     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10153     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10154     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10155                                             Spacing));
10156     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10157                                             Spacing * 2));
10158     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10159                                             Spacing * 3));
10160     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10161     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10162     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10163     TmpInst.addOperand(Inst.getOperand(4));
10164     Inst = TmpInst;
10165     return true;
10166   }
10167 
10168   case ARM::VLD4dWB_fixed_Asm_8:
10169   case ARM::VLD4dWB_fixed_Asm_16:
10170   case ARM::VLD4dWB_fixed_Asm_32:
10171   case ARM::VLD4qWB_fixed_Asm_8:
10172   case ARM::VLD4qWB_fixed_Asm_16:
10173   case ARM::VLD4qWB_fixed_Asm_32: {
10174     MCInst TmpInst;
10175     unsigned Spacing;
10176     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10177     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10178     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10179                                             Spacing));
10180     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10181                                             Spacing * 2));
10182     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10183                                             Spacing * 3));
10184     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10185     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10186     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10187     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10188     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10189     TmpInst.addOperand(Inst.getOperand(4));
10190     Inst = TmpInst;
10191     return true;
10192   }
10193 
10194   case ARM::VLD4dWB_register_Asm_8:
10195   case ARM::VLD4dWB_register_Asm_16:
10196   case ARM::VLD4dWB_register_Asm_32:
10197   case ARM::VLD4qWB_register_Asm_8:
10198   case ARM::VLD4qWB_register_Asm_16:
10199   case ARM::VLD4qWB_register_Asm_32: {
10200     MCInst TmpInst;
10201     unsigned Spacing;
10202     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10203     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10204     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10205                                             Spacing));
10206     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10207                                             Spacing * 2));
10208     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10209                                             Spacing * 3));
10210     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10211     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10212     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10213     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10214     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10215     TmpInst.addOperand(Inst.getOperand(5));
10216     Inst = TmpInst;
10217     return true;
10218   }
10219 
10220   // VST3 multiple 3-element structure instructions.
10221   case ARM::VST3dAsm_8:
10222   case ARM::VST3dAsm_16:
10223   case ARM::VST3dAsm_32:
10224   case ARM::VST3qAsm_8:
10225   case ARM::VST3qAsm_16:
10226   case ARM::VST3qAsm_32: {
10227     MCInst TmpInst;
10228     unsigned Spacing;
10229     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10230     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10231     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10232     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10233     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10234                                             Spacing));
10235     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10236                                             Spacing * 2));
10237     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10238     TmpInst.addOperand(Inst.getOperand(4));
10239     Inst = TmpInst;
10240     return true;
10241   }
10242 
10243   case ARM::VST3dWB_fixed_Asm_8:
10244   case ARM::VST3dWB_fixed_Asm_16:
10245   case ARM::VST3dWB_fixed_Asm_32:
10246   case ARM::VST3qWB_fixed_Asm_8:
10247   case ARM::VST3qWB_fixed_Asm_16:
10248   case ARM::VST3qWB_fixed_Asm_32: {
10249     MCInst TmpInst;
10250     unsigned Spacing;
10251     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10252     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10253     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10254     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10255     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10256     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10257     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10258                                             Spacing));
10259     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10260                                             Spacing * 2));
10261     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10262     TmpInst.addOperand(Inst.getOperand(4));
10263     Inst = TmpInst;
10264     return true;
10265   }
10266 
10267   case ARM::VST3dWB_register_Asm_8:
10268   case ARM::VST3dWB_register_Asm_16:
10269   case ARM::VST3dWB_register_Asm_32:
10270   case ARM::VST3qWB_register_Asm_8:
10271   case ARM::VST3qWB_register_Asm_16:
10272   case ARM::VST3qWB_register_Asm_32: {
10273     MCInst TmpInst;
10274     unsigned Spacing;
10275     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10276     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10277     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10278     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10279     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10280     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10281     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10282                                             Spacing));
10283     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10284                                             Spacing * 2));
10285     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10286     TmpInst.addOperand(Inst.getOperand(5));
10287     Inst = TmpInst;
10288     return true;
10289   }
10290 
10291   // VST4 multiple 3-element structure instructions.
10292   case ARM::VST4dAsm_8:
10293   case ARM::VST4dAsm_16:
10294   case ARM::VST4dAsm_32:
10295   case ARM::VST4qAsm_8:
10296   case ARM::VST4qAsm_16:
10297   case ARM::VST4qAsm_32: {
10298     MCInst TmpInst;
10299     unsigned Spacing;
10300     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10301     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10302     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10303     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10304     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10305                                             Spacing));
10306     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10307                                             Spacing * 2));
10308     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10309                                             Spacing * 3));
10310     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10311     TmpInst.addOperand(Inst.getOperand(4));
10312     Inst = TmpInst;
10313     return true;
10314   }
10315 
10316   case ARM::VST4dWB_fixed_Asm_8:
10317   case ARM::VST4dWB_fixed_Asm_16:
10318   case ARM::VST4dWB_fixed_Asm_32:
10319   case ARM::VST4qWB_fixed_Asm_8:
10320   case ARM::VST4qWB_fixed_Asm_16:
10321   case ARM::VST4qWB_fixed_Asm_32: {
10322     MCInst TmpInst;
10323     unsigned Spacing;
10324     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10325     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10326     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10327     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10328     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10329     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10330     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10331                                             Spacing));
10332     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10333                                             Spacing * 2));
10334     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10335                                             Spacing * 3));
10336     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10337     TmpInst.addOperand(Inst.getOperand(4));
10338     Inst = TmpInst;
10339     return true;
10340   }
10341 
10342   case ARM::VST4dWB_register_Asm_8:
10343   case ARM::VST4dWB_register_Asm_16:
10344   case ARM::VST4dWB_register_Asm_32:
10345   case ARM::VST4qWB_register_Asm_8:
10346   case ARM::VST4qWB_register_Asm_16:
10347   case ARM::VST4qWB_register_Asm_32: {
10348     MCInst TmpInst;
10349     unsigned Spacing;
10350     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10351     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10352     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10353     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10354     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10355     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10356     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10357                                             Spacing));
10358     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10359                                             Spacing * 2));
10360     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10361                                             Spacing * 3));
10362     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10363     TmpInst.addOperand(Inst.getOperand(5));
10364     Inst = TmpInst;
10365     return true;
10366   }
10367 
10368   // Handle encoding choice for the shift-immediate instructions.
10369   case ARM::t2LSLri:
10370   case ARM::t2LSRri:
10371   case ARM::t2ASRri:
10372     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10373         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10374         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10375         !HasWideQualifier) {
10376       unsigned NewOpc;
10377       switch (Inst.getOpcode()) {
10378       default: llvm_unreachable("unexpected opcode");
10379       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10380       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10381       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10382       }
10383       // The Thumb1 operands aren't in the same order. Awesome, eh?
10384       MCInst TmpInst;
10385       TmpInst.setOpcode(NewOpc);
10386       TmpInst.addOperand(Inst.getOperand(0));
10387       TmpInst.addOperand(Inst.getOperand(5));
10388       TmpInst.addOperand(Inst.getOperand(1));
10389       TmpInst.addOperand(Inst.getOperand(2));
10390       TmpInst.addOperand(Inst.getOperand(3));
10391       TmpInst.addOperand(Inst.getOperand(4));
10392       Inst = TmpInst;
10393       return true;
10394     }
10395     return false;
10396 
10397   // Handle the Thumb2 mode MOV complex aliases.
10398   case ARM::t2MOVsr:
10399   case ARM::t2MOVSsr: {
10400     // Which instruction to expand to depends on the CCOut operand and
10401     // whether we're in an IT block if the register operands are low
10402     // registers.
10403     bool isNarrow = false;
10404     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10405         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10406         isARMLowRegister(Inst.getOperand(2).getReg()) &&
10407         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10408         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10409         !HasWideQualifier)
10410       isNarrow = true;
10411     MCInst TmpInst;
10412     unsigned newOpc;
10413     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10414     default: llvm_unreachable("unexpected opcode!");
10415     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10416     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10417     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10418     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
10419     }
10420     TmpInst.setOpcode(newOpc);
10421     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10422     if (isNarrow)
10423       TmpInst.addOperand(MCOperand::createReg(
10424           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10425     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10426     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10427     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10428     TmpInst.addOperand(Inst.getOperand(5));
10429     if (!isNarrow)
10430       TmpInst.addOperand(MCOperand::createReg(
10431           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10432     Inst = TmpInst;
10433     return true;
10434   }
10435   case ARM::t2MOVsi:
10436   case ARM::t2MOVSsi: {
10437     // Which instruction to expand to depends on the CCOut operand and
10438     // whether we're in an IT block if the register operands are low
10439     // registers.
10440     bool isNarrow = false;
10441     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10442         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10443         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10444         !HasWideQualifier)
10445       isNarrow = true;
10446     MCInst TmpInst;
10447     unsigned newOpc;
10448     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10449     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10450     bool isMov = false;
10451     // MOV rd, rm, LSL #0 is actually a MOV instruction
10452     if (Shift == ARM_AM::lsl && Amount == 0) {
10453       isMov = true;
10454       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10455       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10456       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10457       // instead.
10458       if (inITBlock()) {
10459         isNarrow = false;
10460       }
10461       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10462     } else {
10463       switch(Shift) {
10464       default: llvm_unreachable("unexpected opcode!");
10465       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10466       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10467       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10468       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10469       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10470       }
10471     }
10472     if (Amount == 32) Amount = 0;
10473     TmpInst.setOpcode(newOpc);
10474     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10475     if (isNarrow && !isMov)
10476       TmpInst.addOperand(MCOperand::createReg(
10477           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10478     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10479     if (newOpc != ARM::t2RRX && !isMov)
10480       TmpInst.addOperand(MCOperand::createImm(Amount));
10481     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10482     TmpInst.addOperand(Inst.getOperand(4));
10483     if (!isNarrow)
10484       TmpInst.addOperand(MCOperand::createReg(
10485           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10486     Inst = TmpInst;
10487     return true;
10488   }
10489   // Handle the ARM mode MOV complex aliases.
10490   case ARM::ASRr:
10491   case ARM::LSRr:
10492   case ARM::LSLr:
10493   case ARM::RORr: {
10494     ARM_AM::ShiftOpc ShiftTy;
10495     switch(Inst.getOpcode()) {
10496     default: llvm_unreachable("unexpected opcode!");
10497     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10498     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10499     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10500     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10501     }
10502     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10503     MCInst TmpInst;
10504     TmpInst.setOpcode(ARM::MOVsr);
10505     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10506     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10507     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10508     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10509     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10510     TmpInst.addOperand(Inst.getOperand(4));
10511     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10512     Inst = TmpInst;
10513     return true;
10514   }
10515   case ARM::ASRi:
10516   case ARM::LSRi:
10517   case ARM::LSLi:
10518   case ARM::RORi: {
10519     ARM_AM::ShiftOpc ShiftTy;
10520     switch(Inst.getOpcode()) {
10521     default: llvm_unreachable("unexpected opcode!");
10522     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10523     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10524     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10525     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10526     }
10527     // A shift by zero is a plain MOVr, not a MOVsi.
10528     unsigned Amt = Inst.getOperand(2).getImm();
10529     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10530     // A shift by 32 should be encoded as 0 when permitted
10531     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10532       Amt = 0;
10533     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10534     MCInst TmpInst;
10535     TmpInst.setOpcode(Opc);
10536     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10537     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10538     if (Opc == ARM::MOVsi)
10539       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10540     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10541     TmpInst.addOperand(Inst.getOperand(4));
10542     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10543     Inst = TmpInst;
10544     return true;
10545   }
10546   case ARM::RRXi: {
10547     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10548     MCInst TmpInst;
10549     TmpInst.setOpcode(ARM::MOVsi);
10550     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10551     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10552     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10553     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10554     TmpInst.addOperand(Inst.getOperand(3));
10555     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10556     Inst = TmpInst;
10557     return true;
10558   }
10559   case ARM::t2LDMIA_UPD: {
10560     // If this is a load of a single register, then we should use
10561     // a post-indexed LDR instruction instead, per the ARM ARM.
10562     if (Inst.getNumOperands() != 5)
10563       return false;
10564     MCInst TmpInst;
10565     TmpInst.setOpcode(ARM::t2LDR_POST);
10566     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10567     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10568     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10569     TmpInst.addOperand(MCOperand::createImm(4));
10570     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10571     TmpInst.addOperand(Inst.getOperand(3));
10572     Inst = TmpInst;
10573     return true;
10574   }
10575   case ARM::t2STMDB_UPD: {
10576     // If this is a store of a single register, then we should use
10577     // a pre-indexed STR instruction instead, per the ARM ARM.
10578     if (Inst.getNumOperands() != 5)
10579       return false;
10580     MCInst TmpInst;
10581     TmpInst.setOpcode(ARM::t2STR_PRE);
10582     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10583     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10584     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10585     TmpInst.addOperand(MCOperand::createImm(-4));
10586     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10587     TmpInst.addOperand(Inst.getOperand(3));
10588     Inst = TmpInst;
10589     return true;
10590   }
10591   case ARM::LDMIA_UPD:
10592     // If this is a load of a single register via a 'pop', then we should use
10593     // a post-indexed LDR instruction instead, per the ARM ARM.
10594     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10595         Inst.getNumOperands() == 5) {
10596       MCInst TmpInst;
10597       TmpInst.setOpcode(ARM::LDR_POST_IMM);
10598       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10599       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10600       TmpInst.addOperand(Inst.getOperand(1)); // Rn
10601       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
10602       TmpInst.addOperand(MCOperand::createImm(4));
10603       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10604       TmpInst.addOperand(Inst.getOperand(3));
10605       Inst = TmpInst;
10606       return true;
10607     }
10608     break;
10609   case ARM::STMDB_UPD:
10610     // If this is a store of a single register via a 'push', then we should use
10611     // a pre-indexed STR instruction instead, per the ARM ARM.
10612     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10613         Inst.getNumOperands() == 5) {
10614       MCInst TmpInst;
10615       TmpInst.setOpcode(ARM::STR_PRE_IMM);
10616       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10617       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10618       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10619       TmpInst.addOperand(MCOperand::createImm(-4));
10620       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10621       TmpInst.addOperand(Inst.getOperand(3));
10622       Inst = TmpInst;
10623     }
10624     break;
10625   case ARM::t2ADDri12:
10626   case ARM::t2SUBri12:
10627   case ARM::t2ADDspImm12:
10628   case ARM::t2SUBspImm12: {
10629     // If the immediate fits for encoding T3 and the generic
10630     // mnemonic was used, encoding T3 is preferred.
10631     const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10632     if ((Token != "add" && Token != "sub") ||
10633         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10634       break;
10635     switch (Inst.getOpcode()) {
10636     case ARM::t2ADDri12:
10637       Inst.setOpcode(ARM::t2ADDri);
10638       break;
10639     case ARM::t2SUBri12:
10640       Inst.setOpcode(ARM::t2SUBri);
10641       break;
10642     case ARM::t2ADDspImm12:
10643       Inst.setOpcode(ARM::t2ADDspImm);
10644       break;
10645     case ARM::t2SUBspImm12:
10646       Inst.setOpcode(ARM::t2SUBspImm);
10647       break;
10648     }
10649 
10650     Inst.addOperand(MCOperand::createReg(0)); // cc_out
10651     return true;
10652   }
10653   case ARM::tADDi8:
10654     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10655     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10656     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10657     // to encoding T1 if <Rd> is omitted."
10658     if (Inst.getOperand(3).isImm() &&
10659         (unsigned)Inst.getOperand(3).getImm() < 8 &&
10660         Operands.size() == MnemonicOpsEndInd + 3) {
10661       Inst.setOpcode(ARM::tADDi3);
10662       return true;
10663     }
10664     break;
10665   case ARM::tSUBi8:
10666     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10667     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10668     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10669     // to encoding T1 if <Rd> is omitted."
10670     if ((unsigned)Inst.getOperand(3).getImm() < 8 &&
10671         Operands.size() == MnemonicOpsEndInd + 3) {
10672       Inst.setOpcode(ARM::tSUBi3);
10673       return true;
10674     }
10675     break;
10676   case ARM::t2ADDri:
10677   case ARM::t2SUBri: {
10678     // If the destination and first source operand are the same, and
10679     // the flags are compatible with the current IT status, use encoding T2
10680     // instead of T3. For compatibility with the system 'as'. Make sure the
10681     // wide encoding wasn't explicit.
10682     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10683         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10684         (Inst.getOperand(2).isImm() &&
10685          (unsigned)Inst.getOperand(2).getImm() > 255) ||
10686         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10687         HasWideQualifier)
10688       break;
10689     MCInst TmpInst;
10690     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10691                       ARM::tADDi8 : ARM::tSUBi8);
10692     TmpInst.addOperand(Inst.getOperand(0));
10693     TmpInst.addOperand(Inst.getOperand(5));
10694     TmpInst.addOperand(Inst.getOperand(0));
10695     TmpInst.addOperand(Inst.getOperand(2));
10696     TmpInst.addOperand(Inst.getOperand(3));
10697     TmpInst.addOperand(Inst.getOperand(4));
10698     Inst = TmpInst;
10699     return true;
10700   }
10701   case ARM::t2ADDspImm:
10702   case ARM::t2SUBspImm: {
10703     // Prefer T1 encoding if possible
10704     if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10705       break;
10706     unsigned V = Inst.getOperand(2).getImm();
10707     if (V & 3 || V > ((1 << 7) - 1) << 2)
10708       break;
10709     MCInst TmpInst;
10710     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10711                                                           : ARM::tSUBspi);
10712     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10713     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10714     TmpInst.addOperand(MCOperand::createImm(V / 4));   // immediate
10715     TmpInst.addOperand(Inst.getOperand(3));            // pred
10716     TmpInst.addOperand(Inst.getOperand(4));
10717     Inst = TmpInst;
10718     return true;
10719   }
10720   case ARM::t2ADDrr: {
10721     // If the destination and first source operand are the same, and
10722     // there's no setting of the flags, use encoding T2 instead of T3.
10723     // Note that this is only for ADD, not SUB. This mirrors the system
10724     // 'as' behaviour.  Also take advantage of ADD being commutative.
10725     // Make sure the wide encoding wasn't explicit.
10726     bool Swap = false;
10727     auto DestReg = Inst.getOperand(0).getReg();
10728     bool Transform = DestReg == Inst.getOperand(1).getReg();
10729     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10730       Transform = true;
10731       Swap = true;
10732     }
10733     if (!Transform ||
10734         Inst.getOperand(5).getReg() != 0 ||
10735         HasWideQualifier)
10736       break;
10737     MCInst TmpInst;
10738     TmpInst.setOpcode(ARM::tADDhirr);
10739     TmpInst.addOperand(Inst.getOperand(0));
10740     TmpInst.addOperand(Inst.getOperand(0));
10741     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10742     TmpInst.addOperand(Inst.getOperand(3));
10743     TmpInst.addOperand(Inst.getOperand(4));
10744     Inst = TmpInst;
10745     return true;
10746   }
10747   case ARM::tADDrSP:
10748     // If the non-SP source operand and the destination operand are not the
10749     // same, we need to use the 32-bit encoding if it's available.
10750     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10751       Inst.setOpcode(ARM::t2ADDrr);
10752       Inst.addOperand(MCOperand::createReg(0)); // cc_out
10753       return true;
10754     }
10755     break;
10756   case ARM::tB:
10757     // A Thumb conditional branch outside of an IT block is a tBcc.
10758     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10759       Inst.setOpcode(ARM::tBcc);
10760       return true;
10761     }
10762     break;
10763   case ARM::t2B:
10764     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10765     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10766       Inst.setOpcode(ARM::t2Bcc);
10767       return true;
10768     }
10769     break;
10770   case ARM::t2Bcc:
10771     // If the conditional is AL or we're in an IT block, we really want t2B.
10772     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10773       Inst.setOpcode(ARM::t2B);
10774       return true;
10775     }
10776     break;
10777   case ARM::tBcc:
10778     // If the conditional is AL, we really want tB.
10779     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10780       Inst.setOpcode(ARM::tB);
10781       return true;
10782     }
10783     break;
10784   case ARM::tLDMIA: {
10785     // If the register list contains any high registers, or if the writeback
10786     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10787     // instead if we're in Thumb2. Otherwise, this should have generated
10788     // an error in validateInstruction().
10789     unsigned Rn = Inst.getOperand(0).getReg();
10790     bool hasWritebackToken =
10791         (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10792              .isToken() &&
10793          static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10794                  .getToken() == "!");
10795     bool listContainsBase;
10796     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10797         (!listContainsBase && !hasWritebackToken) ||
10798         (listContainsBase && hasWritebackToken)) {
10799       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10800       assert(isThumbTwo());
10801       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10802       // If we're switching to the updating version, we need to insert
10803       // the writeback tied operand.
10804       if (hasWritebackToken)
10805         Inst.insert(Inst.begin(),
10806                     MCOperand::createReg(Inst.getOperand(0).getReg()));
10807       return true;
10808     }
10809     break;
10810   }
10811   case ARM::tSTMIA_UPD: {
10812     // If the register list contains any high registers, we need to use
10813     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10814     // should have generated an error in validateInstruction().
10815     unsigned Rn = Inst.getOperand(0).getReg();
10816     bool listContainsBase;
10817     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10818       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10819       assert(isThumbTwo());
10820       Inst.setOpcode(ARM::t2STMIA_UPD);
10821       return true;
10822     }
10823     break;
10824   }
10825   case ARM::tPOP: {
10826     bool listContainsBase;
10827     // If the register list contains any high registers, we need to use
10828     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10829     // should have generated an error in validateInstruction().
10830     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10831       return false;
10832     assert(isThumbTwo());
10833     Inst.setOpcode(ARM::t2LDMIA_UPD);
10834     // Add the base register and writeback operands.
10835     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10836     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10837     return true;
10838   }
10839   case ARM::tPUSH: {
10840     bool listContainsBase;
10841     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10842       return false;
10843     assert(isThumbTwo());
10844     Inst.setOpcode(ARM::t2STMDB_UPD);
10845     // Add the base register and writeback operands.
10846     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10847     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10848     return true;
10849   }
10850   case ARM::t2MOVi:
10851     // If we can use the 16-bit encoding and the user didn't explicitly
10852     // request the 32-bit variant, transform it here.
10853     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10854         (Inst.getOperand(1).isImm() &&
10855          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10856         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10857         !HasWideQualifier) {
10858       // The operands aren't in the same order for tMOVi8...
10859       MCInst TmpInst;
10860       TmpInst.setOpcode(ARM::tMOVi8);
10861       TmpInst.addOperand(Inst.getOperand(0));
10862       TmpInst.addOperand(Inst.getOperand(4));
10863       TmpInst.addOperand(Inst.getOperand(1));
10864       TmpInst.addOperand(Inst.getOperand(2));
10865       TmpInst.addOperand(Inst.getOperand(3));
10866       Inst = TmpInst;
10867       return true;
10868     }
10869     break;
10870 
10871   case ARM::t2MOVr:
10872     // If we can use the 16-bit encoding and the user didn't explicitly
10873     // request the 32-bit variant, transform it here.
10874     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10875         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10876         Inst.getOperand(2).getImm() == ARMCC::AL &&
10877         Inst.getOperand(4).getReg() == ARM::CPSR &&
10878         !HasWideQualifier) {
10879       // The operands aren't the same for tMOV[S]r... (no cc_out)
10880       MCInst TmpInst;
10881       unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10882       TmpInst.setOpcode(Op);
10883       TmpInst.addOperand(Inst.getOperand(0));
10884       TmpInst.addOperand(Inst.getOperand(1));
10885       if (Op == ARM::tMOVr) {
10886         TmpInst.addOperand(Inst.getOperand(2));
10887         TmpInst.addOperand(Inst.getOperand(3));
10888       }
10889       Inst = TmpInst;
10890       return true;
10891     }
10892     break;
10893 
10894   case ARM::t2SXTH:
10895   case ARM::t2SXTB:
10896   case ARM::t2UXTH:
10897   case ARM::t2UXTB:
10898     // If we can use the 16-bit encoding and the user didn't explicitly
10899     // request the 32-bit variant, transform it here.
10900     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10901         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10902         Inst.getOperand(2).getImm() == 0 &&
10903         !HasWideQualifier) {
10904       unsigned NewOpc;
10905       switch (Inst.getOpcode()) {
10906       default: llvm_unreachable("Illegal opcode!");
10907       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10908       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10909       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10910       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10911       }
10912       // The operands aren't the same for thumb1 (no rotate operand).
10913       MCInst TmpInst;
10914       TmpInst.setOpcode(NewOpc);
10915       TmpInst.addOperand(Inst.getOperand(0));
10916       TmpInst.addOperand(Inst.getOperand(1));
10917       TmpInst.addOperand(Inst.getOperand(3));
10918       TmpInst.addOperand(Inst.getOperand(4));
10919       Inst = TmpInst;
10920       return true;
10921     }
10922     break;
10923 
10924   case ARM::MOVsi: {
10925     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10926     // rrx shifts and asr/lsr of #32 is encoded as 0
10927     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10928       return false;
10929     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10930       // Shifting by zero is accepted as a vanilla 'MOVr'
10931       MCInst TmpInst;
10932       TmpInst.setOpcode(ARM::MOVr);
10933       TmpInst.addOperand(Inst.getOperand(0));
10934       TmpInst.addOperand(Inst.getOperand(1));
10935       TmpInst.addOperand(Inst.getOperand(3));
10936       TmpInst.addOperand(Inst.getOperand(4));
10937       TmpInst.addOperand(Inst.getOperand(5));
10938       Inst = TmpInst;
10939       return true;
10940     }
10941     return false;
10942   }
10943   case ARM::ANDrsi:
10944   case ARM::ORRrsi:
10945   case ARM::EORrsi:
10946   case ARM::BICrsi:
10947   case ARM::SUBrsi:
10948   case ARM::ADDrsi: {
10949     unsigned newOpc;
10950     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10951     if (SOpc == ARM_AM::rrx) return false;
10952     switch (Inst.getOpcode()) {
10953     default: llvm_unreachable("unexpected opcode!");
10954     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10955     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10956     case ARM::EORrsi: newOpc = ARM::EORrr; break;
10957     case ARM::BICrsi: newOpc = ARM::BICrr; break;
10958     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10959     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10960     }
10961     // If the shift is by zero, use the non-shifted instruction definition.
10962     // The exception is for right shifts, where 0 == 32
10963     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10964         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10965       MCInst TmpInst;
10966       TmpInst.setOpcode(newOpc);
10967       TmpInst.addOperand(Inst.getOperand(0));
10968       TmpInst.addOperand(Inst.getOperand(1));
10969       TmpInst.addOperand(Inst.getOperand(2));
10970       TmpInst.addOperand(Inst.getOperand(4));
10971       TmpInst.addOperand(Inst.getOperand(5));
10972       TmpInst.addOperand(Inst.getOperand(6));
10973       Inst = TmpInst;
10974       return true;
10975     }
10976     return false;
10977   }
10978   case ARM::ITasm:
10979   case ARM::t2IT: {
10980     // Set up the IT block state according to the IT instruction we just
10981     // matched.
10982     assert(!inITBlock() && "nested IT blocks?!");
10983     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10984                          Inst.getOperand(1).getImm());
10985     break;
10986   }
10987   case ARM::t2LSLrr:
10988   case ARM::t2LSRrr:
10989   case ARM::t2ASRrr:
10990   case ARM::t2SBCrr:
10991   case ARM::t2RORrr:
10992   case ARM::t2BICrr:
10993     // Assemblers should use the narrow encodings of these instructions when permissible.
10994     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10995          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10996         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10997         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10998         !HasWideQualifier) {
10999       unsigned NewOpc;
11000       switch (Inst.getOpcode()) {
11001         default: llvm_unreachable("unexpected opcode");
11002         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
11003         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
11004         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
11005         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
11006         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
11007         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
11008       }
11009       MCInst TmpInst;
11010       TmpInst.setOpcode(NewOpc);
11011       TmpInst.addOperand(Inst.getOperand(0));
11012       TmpInst.addOperand(Inst.getOperand(5));
11013       TmpInst.addOperand(Inst.getOperand(1));
11014       TmpInst.addOperand(Inst.getOperand(2));
11015       TmpInst.addOperand(Inst.getOperand(3));
11016       TmpInst.addOperand(Inst.getOperand(4));
11017       Inst = TmpInst;
11018       return true;
11019     }
11020     return false;
11021 
11022   case ARM::t2ANDrr:
11023   case ARM::t2EORrr:
11024   case ARM::t2ADCrr:
11025   case ARM::t2ORRrr:
11026     // Assemblers should use the narrow encodings of these instructions when permissible.
11027     // These instructions are special in that they are commutable, so shorter encodings
11028     // are available more often.
11029     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
11030          isARMLowRegister(Inst.getOperand(2).getReg())) &&
11031         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
11032          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
11033         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
11034         !HasWideQualifier) {
11035       unsigned NewOpc;
11036       switch (Inst.getOpcode()) {
11037         default: llvm_unreachable("unexpected opcode");
11038         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
11039         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
11040         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
11041         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
11042       }
11043       MCInst TmpInst;
11044       TmpInst.setOpcode(NewOpc);
11045       TmpInst.addOperand(Inst.getOperand(0));
11046       TmpInst.addOperand(Inst.getOperand(5));
11047       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
11048         TmpInst.addOperand(Inst.getOperand(1));
11049         TmpInst.addOperand(Inst.getOperand(2));
11050       } else {
11051         TmpInst.addOperand(Inst.getOperand(2));
11052         TmpInst.addOperand(Inst.getOperand(1));
11053       }
11054       TmpInst.addOperand(Inst.getOperand(3));
11055       TmpInst.addOperand(Inst.getOperand(4));
11056       Inst = TmpInst;
11057       return true;
11058     }
11059     return false;
11060   case ARM::MVE_VPST:
11061   case ARM::MVE_VPTv16i8:
11062   case ARM::MVE_VPTv8i16:
11063   case ARM::MVE_VPTv4i32:
11064   case ARM::MVE_VPTv16u8:
11065   case ARM::MVE_VPTv8u16:
11066   case ARM::MVE_VPTv4u32:
11067   case ARM::MVE_VPTv16s8:
11068   case ARM::MVE_VPTv8s16:
11069   case ARM::MVE_VPTv4s32:
11070   case ARM::MVE_VPTv4f32:
11071   case ARM::MVE_VPTv8f16:
11072   case ARM::MVE_VPTv16i8r:
11073   case ARM::MVE_VPTv8i16r:
11074   case ARM::MVE_VPTv4i32r:
11075   case ARM::MVE_VPTv16u8r:
11076   case ARM::MVE_VPTv8u16r:
11077   case ARM::MVE_VPTv4u32r:
11078   case ARM::MVE_VPTv16s8r:
11079   case ARM::MVE_VPTv8s16r:
11080   case ARM::MVE_VPTv4s32r:
11081   case ARM::MVE_VPTv4f32r:
11082   case ARM::MVE_VPTv8f16r: {
11083     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
11084     MCOperand &MO = Inst.getOperand(0);
11085     VPTState.Mask = MO.getImm();
11086     VPTState.CurPosition = 0;
11087     break;
11088   }
11089   }
11090   return false;
11091 }
11092 
11093 unsigned
checkEarlyTargetMatchPredicate(MCInst & Inst,const OperandVector & Operands)11094 ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
11095                                              const OperandVector &Operands) {
11096   unsigned Opc = Inst.getOpcode();
11097   switch (Opc) {
11098   // Prevent the mov r8 r8 encoding for nop being selected when the v6/thumb 2
11099   // encoding is available.
11100   case ARM::tMOVr: {
11101     if (Operands[0]->isToken() &&
11102         static_cast<ARMOperand &>(*Operands[0]).getToken() == "nop" &&
11103         ((isThumb() && !isThumbOne()) || hasV6MOps())) {
11104       return Match_MnemonicFail;
11105     }
11106   }
11107     [[fallthrough]];
11108   default:
11109     return Match_Success;
11110   }
11111 }
11112 
checkTargetMatchPredicate(MCInst & Inst)11113 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11114   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
11115   // suffix depending on whether they're in an IT block or not.
11116   unsigned Opc = Inst.getOpcode();
11117   const MCInstrDesc &MCID = MII.get(Opc);
11118   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
11119     assert(MCID.hasOptionalDef() &&
11120            "optionally flag setting instruction missing optional def operand");
11121     assert(MCID.NumOperands == Inst.getNumOperands() &&
11122            "operand count mismatch!");
11123     bool IsCPSR = false;
11124     // Check if the instruction has CPSR set.
11125     for (unsigned OpNo = 0; OpNo < MCID.NumOperands; ++OpNo) {
11126       if (MCID.operands()[OpNo].isOptionalDef() &&
11127           Inst.getOperand(OpNo).isReg() &&
11128           Inst.getOperand(OpNo).getReg() == ARM::CPSR)
11129         IsCPSR = true;
11130     }
11131 
11132     // If we're parsing Thumb1, reject it completely.
11133     if (isThumbOne() && !IsCPSR)
11134       return Match_RequiresFlagSetting;
11135     // If we're parsing Thumb2, which form is legal depends on whether we're
11136     // in an IT block.
11137     if (isThumbTwo() && !IsCPSR && !inITBlock())
11138       return Match_RequiresITBlock;
11139     if (isThumbTwo() && IsCPSR && inITBlock())
11140       return Match_RequiresNotITBlock;
11141     // LSL with zero immediate is not allowed in an IT block
11142     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
11143       return Match_RequiresNotITBlock;
11144   } else if (isThumbOne()) {
11145     // Some high-register supporting Thumb1 encodings only allow both registers
11146     // to be from r0-r7 when in Thumb2.
11147     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11148         isARMLowRegister(Inst.getOperand(1).getReg()) &&
11149         isARMLowRegister(Inst.getOperand(2).getReg()))
11150       return Match_RequiresThumb2;
11151     // Others only require ARMv6 or later.
11152     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11153              isARMLowRegister(Inst.getOperand(0).getReg()) &&
11154              isARMLowRegister(Inst.getOperand(1).getReg()))
11155       return Match_RequiresV6;
11156   }
11157 
11158   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11159   // than the loop below can handle, so it uses the GPRnopc register class and
11160   // we do SP handling here.
11161   if (Opc == ARM::t2MOVr && !hasV8Ops())
11162   {
11163     // SP as both source and destination is not allowed
11164     if (Inst.getOperand(0).getReg() == ARM::SP &&
11165         Inst.getOperand(1).getReg() == ARM::SP)
11166       return Match_RequiresV8;
11167     // When flags-setting SP as either source or destination is not allowed
11168     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
11169         (Inst.getOperand(0).getReg() == ARM::SP ||
11170          Inst.getOperand(1).getReg() == ARM::SP))
11171       return Match_RequiresV8;
11172   }
11173 
11174   switch (Inst.getOpcode()) {
11175   case ARM::VMRS:
11176   case ARM::VMSR:
11177   case ARM::VMRS_FPCXTS:
11178   case ARM::VMRS_FPCXTNS:
11179   case ARM::VMSR_FPCXTS:
11180   case ARM::VMSR_FPCXTNS:
11181   case ARM::VMRS_FPSCR_NZCVQC:
11182   case ARM::VMSR_FPSCR_NZCVQC:
11183   case ARM::FMSTAT:
11184   case ARM::VMRS_VPR:
11185   case ARM::VMRS_P0:
11186   case ARM::VMSR_VPR:
11187   case ARM::VMSR_P0:
11188     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11189     // ARMv8-A.
11190     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
11191         (isThumb() && !hasV8Ops()))
11192       return Match_InvalidOperand;
11193     break;
11194   case ARM::t2TBB:
11195   case ARM::t2TBH:
11196     // Rn = sp is only allowed with ARMv8-A
11197     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
11198       return Match_RequiresV8;
11199     break;
11200   case ARM::tMUL:
11201     // The second source operand must be the same register as the destination
11202     // operand.
11203     // FIXME: Ideally this would be handled by ARMGenAsmMatcher and
11204     // emitAsmTiedOperandConstraints.
11205     if (Inst.getOperand(0).getReg() != Inst.getOperand(3).getReg())
11206       return Match_InvalidTiedOperand;
11207     break;
11208   default:
11209     break;
11210   }
11211 
11212   for (unsigned I = 0; I < MCID.NumOperands; ++I)
11213     if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11214       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11215       const auto &Op = Inst.getOperand(I);
11216       if (!Op.isReg()) {
11217         // This can happen in awkward cases with tied operands, e.g. a
11218         // writeback load/store with a complex addressing mode in
11219         // which there's an output operand corresponding to the
11220         // updated written-back base register: the Tablegen-generated
11221         // AsmMatcher will have written a placeholder operand to that
11222         // slot in the form of an immediate 0, because it can't
11223         // generate the register part of the complex addressing-mode
11224         // operand ahead of time.
11225         continue;
11226       }
11227 
11228       unsigned Reg = Op.getReg();
11229       if ((Reg == ARM::SP) && !hasV8Ops())
11230         return Match_RequiresV8;
11231       else if (Reg == ARM::PC)
11232         return Match_InvalidOperand;
11233     }
11234 
11235   return Match_Success;
11236 }
11237 
11238 namespace llvm {
11239 
IsCPSRDead(const MCInst * Instr)11240 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11241   return true; // In an assembly source, no need to second-guess
11242 }
11243 
11244 } // end namespace llvm
11245 
11246 // Returns true if Inst is unpredictable if it is in and IT block, but is not
11247 // the last instruction in the block.
isITBlockTerminator(MCInst & Inst) const11248 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11249   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11250 
11251   // All branch & call instructions terminate IT blocks with the exception of
11252   // SVC.
11253   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11254       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11255     return true;
11256 
11257   // Any arithmetic instruction which writes to the PC also terminates the IT
11258   // block.
11259   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11260     return true;
11261 
11262   return false;
11263 }
11264 
MatchInstruction(OperandVector & Operands,MCInst & Inst,SmallVectorImpl<NearMissInfo> & NearMisses,bool MatchingInlineAsm,bool & EmitInITBlock,MCStreamer & Out)11265 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11266                                           SmallVectorImpl<NearMissInfo> &NearMisses,
11267                                           bool MatchingInlineAsm,
11268                                           bool &EmitInITBlock,
11269                                           MCStreamer &Out) {
11270   // If we can't use an implicit IT block here, just match as normal.
11271   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11272     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11273 
11274   // Try to match the instruction in an extension of the current IT block (if
11275   // there is one).
11276   if (inImplicitITBlock()) {
11277     extendImplicitITBlock(ITState.Cond);
11278     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11279             Match_Success) {
11280       // The match succeded, but we still have to check that the instruction is
11281       // valid in this implicit IT block.
11282       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11283       if (MCID.isPredicable()) {
11284         ARMCC::CondCodes InstCond =
11285             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11286                 .getImm();
11287         ARMCC::CondCodes ITCond = currentITCond();
11288         if (InstCond == ITCond) {
11289           EmitInITBlock = true;
11290           return Match_Success;
11291         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11292           invertCurrentITCondition();
11293           EmitInITBlock = true;
11294           return Match_Success;
11295         }
11296       }
11297     }
11298     rewindImplicitITPosition();
11299   }
11300 
11301   // Finish the current IT block, and try to match outside any IT block.
11302   flushPendingInstructions(Out);
11303   unsigned PlainMatchResult =
11304       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11305   if (PlainMatchResult == Match_Success) {
11306     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11307     if (MCID.isPredicable()) {
11308       ARMCC::CondCodes InstCond =
11309           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11310               .getImm();
11311       // Some forms of the branch instruction have their own condition code
11312       // fields, so can be conditionally executed without an IT block.
11313       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11314         EmitInITBlock = false;
11315         return Match_Success;
11316       }
11317       if (InstCond == ARMCC::AL) {
11318         EmitInITBlock = false;
11319         return Match_Success;
11320       }
11321     } else {
11322       EmitInITBlock = false;
11323       return Match_Success;
11324     }
11325   }
11326 
11327   // Try to match in a new IT block. The matcher doesn't check the actual
11328   // condition, so we create an IT block with a dummy condition, and fix it up
11329   // once we know the actual condition.
11330   startImplicitITBlock();
11331   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11332       Match_Success) {
11333     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11334     if (MCID.isPredicable()) {
11335       ITState.Cond =
11336           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11337               .getImm();
11338       EmitInITBlock = true;
11339       return Match_Success;
11340     }
11341   }
11342   discardImplicitITBlock();
11343 
11344   // If none of these succeed, return the error we got when trying to match
11345   // outside any IT blocks.
11346   EmitInITBlock = false;
11347   return PlainMatchResult;
11348 }
11349 
11350 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11351                                          unsigned VariantID = 0);
11352 
11353 static const char *getSubtargetFeatureName(uint64_t Val);
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)11354 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11355                                            OperandVector &Operands,
11356                                            MCStreamer &Out, uint64_t &ErrorInfo,
11357                                            bool MatchingInlineAsm) {
11358   MCInst Inst;
11359   unsigned MatchResult;
11360   bool PendConditionalInstruction = false;
11361 
11362   SmallVector<NearMissInfo, 4> NearMisses;
11363   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11364                                  PendConditionalInstruction, Out);
11365 
11366   // Find the number of operators that are part of the Mnumonic (LHS).
11367   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
11368 
11369   switch (MatchResult) {
11370   case Match_Success:
11371     LLVM_DEBUG(dbgs() << "Parsed as: ";
11372                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11373                dbgs() << "\n");
11374 
11375     // Context sensitive operand constraints aren't handled by the matcher,
11376     // so check them here.
11377     if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
11378       // Still progress the IT block, otherwise one wrong condition causes
11379       // nasty cascading errors.
11380       forwardITPosition();
11381       forwardVPTPosition();
11382       return true;
11383     }
11384 
11385     {
11386       // Some instructions need post-processing to, for example, tweak which
11387       // encoding is selected. Loop on it while changes happen so the
11388       // individual transformations can chain off each other. E.g.,
11389       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11390       while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
11391         LLVM_DEBUG(dbgs() << "Changed to: ";
11392                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11393                    dbgs() << "\n");
11394     }
11395 
11396     // Only move forward at the very end so that everything in validate
11397     // and process gets a consistent answer about whether we're in an IT
11398     // block.
11399     forwardITPosition();
11400     forwardVPTPosition();
11401 
11402     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11403     // doesn't actually encode.
11404     if (Inst.getOpcode() == ARM::ITasm)
11405       return false;
11406 
11407     Inst.setLoc(IDLoc);
11408     if (PendConditionalInstruction) {
11409       PendingConditionalInsts.push_back(Inst);
11410       if (isITBlockFull() || isITBlockTerminator(Inst))
11411         flushPendingInstructions(Out);
11412     } else {
11413       Out.emitInstruction(Inst, getSTI());
11414     }
11415     return false;
11416   case Match_NearMisses:
11417     ReportNearMisses(NearMisses, IDLoc, Operands);
11418     return true;
11419   case Match_MnemonicFail: {
11420     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11421     std::string Suggestion = ARMMnemonicSpellCheck(
11422       ((ARMOperand &)*Operands[0]).getToken(), FBS);
11423     return Error(IDLoc, "invalid instruction" + Suggestion,
11424                  ((ARMOperand &)*Operands[0]).getLocRange());
11425   }
11426   }
11427 
11428   llvm_unreachable("Implement any new match types added!");
11429 }
11430 
11431 /// parseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)11432 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11433   const MCContext::Environment Format = getContext().getObjectFileType();
11434   bool IsMachO = Format == MCContext::IsMachO;
11435   bool IsCOFF = Format == MCContext::IsCOFF;
11436 
11437   std::string IDVal = DirectiveID.getIdentifier().lower();
11438   if (IDVal == ".word")
11439     parseLiteralValues(4, DirectiveID.getLoc());
11440   else if (IDVal == ".short" || IDVal == ".hword")
11441     parseLiteralValues(2, DirectiveID.getLoc());
11442   else if (IDVal == ".thumb")
11443     parseDirectiveThumb(DirectiveID.getLoc());
11444   else if (IDVal == ".arm")
11445     parseDirectiveARM(DirectiveID.getLoc());
11446   else if (IDVal == ".thumb_func")
11447     parseDirectiveThumbFunc(DirectiveID.getLoc());
11448   else if (IDVal == ".code")
11449     parseDirectiveCode(DirectiveID.getLoc());
11450   else if (IDVal == ".syntax")
11451     parseDirectiveSyntax(DirectiveID.getLoc());
11452   else if (IDVal == ".unreq")
11453     parseDirectiveUnreq(DirectiveID.getLoc());
11454   else if (IDVal == ".fnend")
11455     parseDirectiveFnEnd(DirectiveID.getLoc());
11456   else if (IDVal == ".cantunwind")
11457     parseDirectiveCantUnwind(DirectiveID.getLoc());
11458   else if (IDVal == ".personality")
11459     parseDirectivePersonality(DirectiveID.getLoc());
11460   else if (IDVal == ".handlerdata")
11461     parseDirectiveHandlerData(DirectiveID.getLoc());
11462   else if (IDVal == ".setfp")
11463     parseDirectiveSetFP(DirectiveID.getLoc());
11464   else if (IDVal == ".pad")
11465     parseDirectivePad(DirectiveID.getLoc());
11466   else if (IDVal == ".save")
11467     parseDirectiveRegSave(DirectiveID.getLoc(), false);
11468   else if (IDVal == ".vsave")
11469     parseDirectiveRegSave(DirectiveID.getLoc(), true);
11470   else if (IDVal == ".ltorg" || IDVal == ".pool")
11471     parseDirectiveLtorg(DirectiveID.getLoc());
11472   else if (IDVal == ".even")
11473     parseDirectiveEven(DirectiveID.getLoc());
11474   else if (IDVal == ".personalityindex")
11475     parseDirectivePersonalityIndex(DirectiveID.getLoc());
11476   else if (IDVal == ".unwind_raw")
11477     parseDirectiveUnwindRaw(DirectiveID.getLoc());
11478   else if (IDVal == ".movsp")
11479     parseDirectiveMovSP(DirectiveID.getLoc());
11480   else if (IDVal == ".arch_extension")
11481     parseDirectiveArchExtension(DirectiveID.getLoc());
11482   else if (IDVal == ".align")
11483     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11484   else if (IDVal == ".thumb_set")
11485     parseDirectiveThumbSet(DirectiveID.getLoc());
11486   else if (IDVal == ".inst")
11487     parseDirectiveInst(DirectiveID.getLoc());
11488   else if (IDVal == ".inst.n")
11489     parseDirectiveInst(DirectiveID.getLoc(), 'n');
11490   else if (IDVal == ".inst.w")
11491     parseDirectiveInst(DirectiveID.getLoc(), 'w');
11492   else if (!IsMachO && !IsCOFF) {
11493     if (IDVal == ".arch")
11494       parseDirectiveArch(DirectiveID.getLoc());
11495     else if (IDVal == ".cpu")
11496       parseDirectiveCPU(DirectiveID.getLoc());
11497     else if (IDVal == ".eabi_attribute")
11498       parseDirectiveEabiAttr(DirectiveID.getLoc());
11499     else if (IDVal == ".fpu")
11500       parseDirectiveFPU(DirectiveID.getLoc());
11501     else if (IDVal == ".fnstart")
11502       parseDirectiveFnStart(DirectiveID.getLoc());
11503     else if (IDVal == ".object_arch")
11504       parseDirectiveObjectArch(DirectiveID.getLoc());
11505     else if (IDVal == ".tlsdescseq")
11506       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11507     else
11508       return true;
11509   } else if (IsCOFF) {
11510     if (IDVal == ".seh_stackalloc")
11511       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11512     else if (IDVal == ".seh_stackalloc_w")
11513       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11514     else if (IDVal == ".seh_save_regs")
11515       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11516     else if (IDVal == ".seh_save_regs_w")
11517       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11518     else if (IDVal == ".seh_save_sp")
11519       parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11520     else if (IDVal == ".seh_save_fregs")
11521       parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11522     else if (IDVal == ".seh_save_lr")
11523       parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11524     else if (IDVal == ".seh_endprologue")
11525       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11526     else if (IDVal == ".seh_endprologue_fragment")
11527       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11528     else if (IDVal == ".seh_nop")
11529       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11530     else if (IDVal == ".seh_nop_w")
11531       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11532     else if (IDVal == ".seh_startepilogue")
11533       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11534     else if (IDVal == ".seh_startepilogue_cond")
11535       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11536     else if (IDVal == ".seh_endepilogue")
11537       parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11538     else if (IDVal == ".seh_custom")
11539       parseDirectiveSEHCustom(DirectiveID.getLoc());
11540     else
11541       return true;
11542   } else
11543     return true;
11544   return false;
11545 }
11546 
11547 /// parseLiteralValues
11548 ///  ::= .hword expression [, expression]*
11549 ///  ::= .short expression [, expression]*
11550 ///  ::= .word expression [, expression]*
parseLiteralValues(unsigned Size,SMLoc L)11551 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11552   auto parseOne = [&]() -> bool {
11553     const MCExpr *Value;
11554     if (getParser().parseExpression(Value))
11555       return true;
11556     getParser().getStreamer().emitValue(Value, Size, L);
11557     return false;
11558   };
11559   return (parseMany(parseOne));
11560 }
11561 
11562 /// parseDirectiveThumb
11563 ///  ::= .thumb
parseDirectiveThumb(SMLoc L)11564 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11565   if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11566     return true;
11567 
11568   if (!isThumb())
11569     SwitchMode();
11570 
11571   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11572   getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11573   return false;
11574 }
11575 
11576 /// parseDirectiveARM
11577 ///  ::= .arm
parseDirectiveARM(SMLoc L)11578 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11579   if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11580     return true;
11581 
11582   if (isThumb())
11583     SwitchMode();
11584   getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11585   getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11586   return false;
11587 }
11588 
11589 MCSymbolRefExpr::VariantKind
getVariantKindForName(StringRef Name) const11590 ARMAsmParser::getVariantKindForName(StringRef Name) const {
11591   return StringSwitch<MCSymbolRefExpr::VariantKind>(Name.lower())
11592       .Case("funcdesc", MCSymbolRefExpr::VK_FUNCDESC)
11593       .Case("got", MCSymbolRefExpr::VK_GOT)
11594       .Case("got_prel", MCSymbolRefExpr::VK_ARM_GOT_PREL)
11595       .Case("gotfuncdesc", MCSymbolRefExpr::VK_GOTFUNCDESC)
11596       .Case("gotoff", MCSymbolRefExpr::VK_GOTOFF)
11597       .Case("gotofffuncdesc", MCSymbolRefExpr::VK_GOTOFFFUNCDESC)
11598       .Case("gottpoff", MCSymbolRefExpr::VK_GOTTPOFF)
11599       .Case("gottpoff_fdpic", MCSymbolRefExpr::VK_GOTTPOFF_FDPIC)
11600       .Case("imgrel", MCSymbolRefExpr::VK_COFF_IMGREL32)
11601       .Case("none", MCSymbolRefExpr::VK_ARM_NONE)
11602       .Case("plt", MCSymbolRefExpr::VK_PLT)
11603       .Case("prel31", MCSymbolRefExpr::VK_ARM_PREL31)
11604       .Case("sbrel", MCSymbolRefExpr::VK_ARM_SBREL)
11605       .Case("secrel32", MCSymbolRefExpr::VK_SECREL)
11606       .Case("target1", MCSymbolRefExpr::VK_ARM_TARGET1)
11607       .Case("target2", MCSymbolRefExpr::VK_ARM_TARGET2)
11608       .Case("tlscall", MCSymbolRefExpr::VK_TLSCALL)
11609       .Case("tlsdesc", MCSymbolRefExpr::VK_TLSDESC)
11610       .Case("tlsgd", MCSymbolRefExpr::VK_TLSGD)
11611       .Case("tlsgd_fdpic", MCSymbolRefExpr::VK_TLSGD_FDPIC)
11612       .Case("tlsld", MCSymbolRefExpr::VK_TLSLD)
11613       .Case("tlsldm", MCSymbolRefExpr::VK_TLSLDM)
11614       .Case("tlsldm_fdpic", MCSymbolRefExpr::VK_TLSLDM_FDPIC)
11615       .Case("tlsldo", MCSymbolRefExpr::VK_ARM_TLSLDO)
11616       .Case("tpoff", MCSymbolRefExpr::VK_TPOFF)
11617       .Default(MCSymbolRefExpr::VK_Invalid);
11618 }
11619 
doBeforeLabelEmit(MCSymbol * Symbol,SMLoc IDLoc)11620 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11621   // We need to flush the current implicit IT block on a label, because it is
11622   // not legal to branch into an IT block.
11623   flushPendingInstructions(getStreamer());
11624 }
11625 
onLabelParsed(MCSymbol * Symbol)11626 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11627   if (NextSymbolIsThumb) {
11628     getParser().getStreamer().emitThumbFunc(Symbol);
11629     NextSymbolIsThumb = false;
11630   }
11631 }
11632 
11633 /// parseDirectiveThumbFunc
11634 ///  ::= .thumbfunc symbol_name
parseDirectiveThumbFunc(SMLoc L)11635 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11636   MCAsmParser &Parser = getParser();
11637   const auto Format = getContext().getObjectFileType();
11638   bool IsMachO = Format == MCContext::IsMachO;
11639 
11640   // Darwin asm has (optionally) function name after .thumb_func direction
11641   // ELF doesn't
11642 
11643   if (IsMachO) {
11644     if (Parser.getTok().is(AsmToken::Identifier) ||
11645         Parser.getTok().is(AsmToken::String)) {
11646       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11647           Parser.getTok().getIdentifier());
11648       getParser().getStreamer().emitThumbFunc(Func);
11649       Parser.Lex();
11650       if (parseEOL())
11651         return true;
11652       return false;
11653     }
11654   }
11655 
11656   if (parseEOL())
11657     return true;
11658 
11659   // .thumb_func implies .thumb
11660   if (!isThumb())
11661     SwitchMode();
11662 
11663   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11664 
11665   NextSymbolIsThumb = true;
11666   return false;
11667 }
11668 
11669 /// parseDirectiveSyntax
11670 ///  ::= .syntax unified | divided
parseDirectiveSyntax(SMLoc L)11671 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11672   MCAsmParser &Parser = getParser();
11673   const AsmToken &Tok = Parser.getTok();
11674   if (Tok.isNot(AsmToken::Identifier)) {
11675     Error(L, "unexpected token in .syntax directive");
11676     return false;
11677   }
11678 
11679   StringRef Mode = Tok.getString();
11680   Parser.Lex();
11681   if (check(Mode == "divided" || Mode == "DIVIDED", L,
11682             "'.syntax divided' arm assembly not supported") ||
11683       check(Mode != "unified" && Mode != "UNIFIED", L,
11684             "unrecognized syntax mode in .syntax directive") ||
11685       parseEOL())
11686     return true;
11687 
11688   // TODO tell the MC streamer the mode
11689   // getParser().getStreamer().Emit???();
11690   return false;
11691 }
11692 
11693 /// parseDirectiveCode
11694 ///  ::= .code 16 | 32
parseDirectiveCode(SMLoc L)11695 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11696   MCAsmParser &Parser = getParser();
11697   const AsmToken &Tok = Parser.getTok();
11698   if (Tok.isNot(AsmToken::Integer))
11699     return Error(L, "unexpected token in .code directive");
11700   int64_t Val = Parser.getTok().getIntVal();
11701   if (Val != 16 && Val != 32) {
11702     Error(L, "invalid operand to .code directive");
11703     return false;
11704   }
11705   Parser.Lex();
11706 
11707   if (parseEOL())
11708     return true;
11709 
11710   if (Val == 16) {
11711     if (!hasThumb())
11712       return Error(L, "target does not support Thumb mode");
11713 
11714     if (!isThumb())
11715       SwitchMode();
11716     getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11717   } else {
11718     if (!hasARM())
11719       return Error(L, "target does not support ARM mode");
11720 
11721     if (isThumb())
11722       SwitchMode();
11723     getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11724   }
11725 
11726   return false;
11727 }
11728 
11729 /// parseDirectiveReq
11730 ///  ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)11731 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11732   MCAsmParser &Parser = getParser();
11733   Parser.Lex(); // Eat the '.req' token.
11734   MCRegister Reg;
11735   SMLoc SRegLoc, ERegLoc;
11736   if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11737             "register name expected") ||
11738       parseEOL())
11739     return true;
11740 
11741   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11742     return Error(SRegLoc,
11743                  "redefinition of '" + Name + "' does not match original.");
11744 
11745   return false;
11746 }
11747 
11748 /// parseDirectiveUneq
11749 ///  ::= .unreq registername
parseDirectiveUnreq(SMLoc L)11750 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11751   MCAsmParser &Parser = getParser();
11752   if (Parser.getTok().isNot(AsmToken::Identifier))
11753     return Error(L, "unexpected input in .unreq directive.");
11754   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11755   Parser.Lex(); // Eat the identifier.
11756   return parseEOL();
11757 }
11758 
11759 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11760 // before, if supported by the new target, or emit mapping symbols for the mode
11761 // switch.
FixModeAfterArchChange(bool WasThumb,SMLoc Loc)11762 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11763   if (WasThumb != isThumb()) {
11764     if (WasThumb && hasThumb()) {
11765       // Stay in Thumb mode
11766       SwitchMode();
11767     } else if (!WasThumb && hasARM()) {
11768       // Stay in ARM mode
11769       SwitchMode();
11770     } else {
11771       // Mode switch forced, because the new arch doesn't support the old mode.
11772       getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11773                                                             : MCAF_Code32);
11774       // Warn about the implcit mode switch. GAS does not switch modes here,
11775       // but instead stays in the old mode, reporting an error on any following
11776       // instructions as the mode does not exist on the target.
11777       Warning(Loc, Twine("new target does not support ") +
11778                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11779                        (!WasThumb ? "thumb" : "arm") + " mode");
11780     }
11781   }
11782 }
11783 
11784 /// parseDirectiveArch
11785 ///  ::= .arch token
parseDirectiveArch(SMLoc L)11786 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11787   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11788   ARM::ArchKind ID = ARM::parseArch(Arch);
11789 
11790   if (ID == ARM::ArchKind::INVALID)
11791     return Error(L, "Unknown arch name");
11792 
11793   bool WasThumb = isThumb();
11794   Triple T;
11795   MCSubtargetInfo &STI = copySTI();
11796   STI.setDefaultFeatures("", /*TuneCPU*/ "",
11797                          ("+" + ARM::getArchName(ID)).str());
11798   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11799   FixModeAfterArchChange(WasThumb, L);
11800 
11801   getTargetStreamer().emitArch(ID);
11802   return false;
11803 }
11804 
11805 /// parseDirectiveEabiAttr
11806 ///  ::= .eabi_attribute int, int [, "str"]
11807 ///  ::= .eabi_attribute Tag_name, int [, "str"]
parseDirectiveEabiAttr(SMLoc L)11808 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11809   MCAsmParser &Parser = getParser();
11810   int64_t Tag;
11811   SMLoc TagLoc;
11812   TagLoc = Parser.getTok().getLoc();
11813   if (Parser.getTok().is(AsmToken::Identifier)) {
11814     StringRef Name = Parser.getTok().getIdentifier();
11815     std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11816         Name, ARMBuildAttrs::getARMAttributeTags());
11817     if (!Ret) {
11818       Error(TagLoc, "attribute name not recognised: " + Name);
11819       return false;
11820     }
11821     Tag = *Ret;
11822     Parser.Lex();
11823   } else {
11824     const MCExpr *AttrExpr;
11825 
11826     TagLoc = Parser.getTok().getLoc();
11827     if (Parser.parseExpression(AttrExpr))
11828       return true;
11829 
11830     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11831     if (check(!CE, TagLoc, "expected numeric constant"))
11832       return true;
11833 
11834     Tag = CE->getValue();
11835   }
11836 
11837   if (Parser.parseComma())
11838     return true;
11839 
11840   StringRef StringValue = "";
11841   bool IsStringValue = false;
11842 
11843   int64_t IntegerValue = 0;
11844   bool IsIntegerValue = false;
11845 
11846   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11847     IsStringValue = true;
11848   else if (Tag == ARMBuildAttrs::compatibility) {
11849     IsStringValue = true;
11850     IsIntegerValue = true;
11851   } else if (Tag < 32 || Tag % 2 == 0)
11852     IsIntegerValue = true;
11853   else if (Tag % 2 == 1)
11854     IsStringValue = true;
11855   else
11856     llvm_unreachable("invalid tag type");
11857 
11858   if (IsIntegerValue) {
11859     const MCExpr *ValueExpr;
11860     SMLoc ValueExprLoc = Parser.getTok().getLoc();
11861     if (Parser.parseExpression(ValueExpr))
11862       return true;
11863 
11864     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11865     if (!CE)
11866       return Error(ValueExprLoc, "expected numeric constant");
11867     IntegerValue = CE->getValue();
11868   }
11869 
11870   if (Tag == ARMBuildAttrs::compatibility) {
11871     if (Parser.parseComma())
11872       return true;
11873   }
11874 
11875   std::string EscapedValue;
11876   if (IsStringValue) {
11877     if (Parser.getTok().isNot(AsmToken::String))
11878       return Error(Parser.getTok().getLoc(), "bad string constant");
11879 
11880     if (Tag == ARMBuildAttrs::also_compatible_with) {
11881       if (Parser.parseEscapedString(EscapedValue))
11882         return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11883 
11884       StringValue = EscapedValue;
11885     } else {
11886       StringValue = Parser.getTok().getStringContents();
11887       Parser.Lex();
11888     }
11889   }
11890 
11891   if (Parser.parseEOL())
11892     return true;
11893 
11894   if (IsIntegerValue && IsStringValue) {
11895     assert(Tag == ARMBuildAttrs::compatibility);
11896     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11897   } else if (IsIntegerValue)
11898     getTargetStreamer().emitAttribute(Tag, IntegerValue);
11899   else if (IsStringValue)
11900     getTargetStreamer().emitTextAttribute(Tag, StringValue);
11901   return false;
11902 }
11903 
11904 /// parseDirectiveCPU
11905 ///  ::= .cpu str
parseDirectiveCPU(SMLoc L)11906 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11907   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11908   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11909 
11910   // FIXME: This is using table-gen data, but should be moved to
11911   // ARMTargetParser once that is table-gen'd.
11912   if (!getSTI().isCPUStringValid(CPU))
11913     return Error(L, "Unknown CPU name");
11914 
11915   bool WasThumb = isThumb();
11916   MCSubtargetInfo &STI = copySTI();
11917   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11918   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11919   FixModeAfterArchChange(WasThumb, L);
11920 
11921   return false;
11922 }
11923 
11924 /// parseDirectiveFPU
11925 ///  ::= .fpu str
parseDirectiveFPU(SMLoc L)11926 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11927   SMLoc FPUNameLoc = getTok().getLoc();
11928   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11929 
11930   ARM::FPUKind ID = ARM::parseFPU(FPU);
11931   std::vector<StringRef> Features;
11932   if (!ARM::getFPUFeatures(ID, Features))
11933     return Error(FPUNameLoc, "Unknown FPU name");
11934 
11935   MCSubtargetInfo &STI = copySTI();
11936   for (auto Feature : Features)
11937     STI.ApplyFeatureFlag(Feature);
11938   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11939 
11940   getTargetStreamer().emitFPU(ID);
11941   return false;
11942 }
11943 
11944 /// parseDirectiveFnStart
11945 ///  ::= .fnstart
parseDirectiveFnStart(SMLoc L)11946 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11947   if (parseEOL())
11948     return true;
11949 
11950   if (UC.hasFnStart()) {
11951     Error(L, ".fnstart starts before the end of previous one");
11952     UC.emitFnStartLocNotes();
11953     return true;
11954   }
11955 
11956   // Reset the unwind directives parser state
11957   UC.reset();
11958 
11959   getTargetStreamer().emitFnStart();
11960 
11961   UC.recordFnStart(L);
11962   return false;
11963 }
11964 
11965 /// parseDirectiveFnEnd
11966 ///  ::= .fnend
parseDirectiveFnEnd(SMLoc L)11967 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11968   if (parseEOL())
11969     return true;
11970   // Check the ordering of unwind directives
11971   if (!UC.hasFnStart())
11972     return Error(L, ".fnstart must precede .fnend directive");
11973 
11974   // Reset the unwind directives parser state
11975   getTargetStreamer().emitFnEnd();
11976 
11977   UC.reset();
11978   return false;
11979 }
11980 
11981 /// parseDirectiveCantUnwind
11982 ///  ::= .cantunwind
parseDirectiveCantUnwind(SMLoc L)11983 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11984   if (parseEOL())
11985     return true;
11986 
11987   UC.recordCantUnwind(L);
11988   // Check the ordering of unwind directives
11989   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11990     return true;
11991 
11992   if (UC.hasHandlerData()) {
11993     Error(L, ".cantunwind can't be used with .handlerdata directive");
11994     UC.emitHandlerDataLocNotes();
11995     return true;
11996   }
11997   if (UC.hasPersonality()) {
11998     Error(L, ".cantunwind can't be used with .personality directive");
11999     UC.emitPersonalityLocNotes();
12000     return true;
12001   }
12002 
12003   getTargetStreamer().emitCantUnwind();
12004   return false;
12005 }
12006 
12007 /// parseDirectivePersonality
12008 ///  ::= .personality name
parseDirectivePersonality(SMLoc L)12009 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
12010   MCAsmParser &Parser = getParser();
12011   bool HasExistingPersonality = UC.hasPersonality();
12012 
12013   // Parse the name of the personality routine
12014   if (Parser.getTok().isNot(AsmToken::Identifier))
12015     return Error(L, "unexpected input in .personality directive.");
12016   StringRef Name(Parser.getTok().getIdentifier());
12017   Parser.Lex();
12018 
12019   if (parseEOL())
12020     return true;
12021 
12022   UC.recordPersonality(L);
12023 
12024   // Check the ordering of unwind directives
12025   if (!UC.hasFnStart())
12026     return Error(L, ".fnstart must precede .personality directive");
12027   if (UC.cantUnwind()) {
12028     Error(L, ".personality can't be used with .cantunwind directive");
12029     UC.emitCantUnwindLocNotes();
12030     return true;
12031   }
12032   if (UC.hasHandlerData()) {
12033     Error(L, ".personality must precede .handlerdata directive");
12034     UC.emitHandlerDataLocNotes();
12035     return true;
12036   }
12037   if (HasExistingPersonality) {
12038     Error(L, "multiple personality directives");
12039     UC.emitPersonalityLocNotes();
12040     return true;
12041   }
12042 
12043   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
12044   getTargetStreamer().emitPersonality(PR);
12045   return false;
12046 }
12047 
12048 /// parseDirectiveHandlerData
12049 ///  ::= .handlerdata
parseDirectiveHandlerData(SMLoc L)12050 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
12051   if (parseEOL())
12052     return true;
12053 
12054   UC.recordHandlerData(L);
12055   // Check the ordering of unwind directives
12056   if (!UC.hasFnStart())
12057     return Error(L, ".fnstart must precede .personality directive");
12058   if (UC.cantUnwind()) {
12059     Error(L, ".handlerdata can't be used with .cantunwind directive");
12060     UC.emitCantUnwindLocNotes();
12061     return true;
12062   }
12063 
12064   getTargetStreamer().emitHandlerData();
12065   return false;
12066 }
12067 
12068 /// parseDirectiveSetFP
12069 ///  ::= .setfp fpreg, spreg [, offset]
parseDirectiveSetFP(SMLoc L)12070 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
12071   MCAsmParser &Parser = getParser();
12072   // Check the ordering of unwind directives
12073   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
12074       check(UC.hasHandlerData(), L,
12075             ".setfp must precede .handlerdata directive"))
12076     return true;
12077 
12078   // Parse fpreg
12079   SMLoc FPRegLoc = Parser.getTok().getLoc();
12080   int FPReg = tryParseRegister();
12081 
12082   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
12083       Parser.parseComma())
12084     return true;
12085 
12086   // Parse spreg
12087   SMLoc SPRegLoc = Parser.getTok().getLoc();
12088   int SPReg = tryParseRegister();
12089   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
12090       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
12091             "register should be either $sp or the latest fp register"))
12092     return true;
12093 
12094   // Update the frame pointer register
12095   UC.saveFPReg(FPReg);
12096 
12097   // Parse offset
12098   int64_t Offset = 0;
12099   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12100     if (Parser.getTok().isNot(AsmToken::Hash) &&
12101         Parser.getTok().isNot(AsmToken::Dollar))
12102       return Error(Parser.getTok().getLoc(), "'#' expected");
12103     Parser.Lex(); // skip hash token.
12104 
12105     const MCExpr *OffsetExpr;
12106     SMLoc ExLoc = Parser.getTok().getLoc();
12107     SMLoc EndLoc;
12108     if (getParser().parseExpression(OffsetExpr, EndLoc))
12109       return Error(ExLoc, "malformed setfp offset");
12110     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12111     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
12112       return true;
12113     Offset = CE->getValue();
12114   }
12115 
12116   if (Parser.parseEOL())
12117     return true;
12118 
12119   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
12120                                 static_cast<unsigned>(SPReg), Offset);
12121   return false;
12122 }
12123 
12124 /// parseDirective
12125 ///  ::= .pad offset
parseDirectivePad(SMLoc L)12126 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
12127   MCAsmParser &Parser = getParser();
12128   // Check the ordering of unwind directives
12129   if (!UC.hasFnStart())
12130     return Error(L, ".fnstart must precede .pad directive");
12131   if (UC.hasHandlerData())
12132     return Error(L, ".pad must precede .handlerdata directive");
12133 
12134   // Parse the offset
12135   if (Parser.getTok().isNot(AsmToken::Hash) &&
12136       Parser.getTok().isNot(AsmToken::Dollar))
12137     return Error(Parser.getTok().getLoc(), "'#' expected");
12138   Parser.Lex(); // skip hash token.
12139 
12140   const MCExpr *OffsetExpr;
12141   SMLoc ExLoc = Parser.getTok().getLoc();
12142   SMLoc EndLoc;
12143   if (getParser().parseExpression(OffsetExpr, EndLoc))
12144     return Error(ExLoc, "malformed pad offset");
12145   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12146   if (!CE)
12147     return Error(ExLoc, "pad offset must be an immediate");
12148 
12149   if (parseEOL())
12150     return true;
12151 
12152   getTargetStreamer().emitPad(CE->getValue());
12153   return false;
12154 }
12155 
12156 /// parseDirectiveRegSave
12157 ///  ::= .save  { registers }
12158 ///  ::= .vsave { registers }
parseDirectiveRegSave(SMLoc L,bool IsVector)12159 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
12160   // Check the ordering of unwind directives
12161   if (!UC.hasFnStart())
12162     return Error(L, ".fnstart must precede .save or .vsave directives");
12163   if (UC.hasHandlerData())
12164     return Error(L, ".save or .vsave must precede .handlerdata directive");
12165 
12166   // RAII object to make sure parsed operands are deleted.
12167   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12168 
12169   // Parse the register list
12170   if (parseRegisterList(Operands, true, true) || parseEOL())
12171     return true;
12172   ARMOperand &Op = (ARMOperand &)*Operands[0];
12173   if (!IsVector && !Op.isRegList())
12174     return Error(L, ".save expects GPR registers");
12175   if (IsVector && !Op.isDPRRegList())
12176     return Error(L, ".vsave expects DPR registers");
12177 
12178   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
12179   return false;
12180 }
12181 
12182 /// parseDirectiveInst
12183 ///  ::= .inst opcode [, ...]
12184 ///  ::= .inst.n opcode [, ...]
12185 ///  ::= .inst.w opcode [, ...]
parseDirectiveInst(SMLoc Loc,char Suffix)12186 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12187   int Width = 4;
12188 
12189   if (isThumb()) {
12190     switch (Suffix) {
12191     case 'n':
12192       Width = 2;
12193       break;
12194     case 'w':
12195       break;
12196     default:
12197       Width = 0;
12198       break;
12199     }
12200   } else {
12201     if (Suffix)
12202       return Error(Loc, "width suffixes are invalid in ARM mode");
12203   }
12204 
12205   auto parseOne = [&]() -> bool {
12206     const MCExpr *Expr;
12207     if (getParser().parseExpression(Expr))
12208       return true;
12209     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
12210     if (!Value) {
12211       return Error(Loc, "expected constant expression");
12212     }
12213 
12214     char CurSuffix = Suffix;
12215     switch (Width) {
12216     case 2:
12217       if (Value->getValue() > 0xffff)
12218         return Error(Loc, "inst.n operand is too big, use inst.w instead");
12219       break;
12220     case 4:
12221       if (Value->getValue() > 0xffffffff)
12222         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
12223                               " operand is too big");
12224       break;
12225     case 0:
12226       // Thumb mode, no width indicated. Guess from the opcode, if possible.
12227       if (Value->getValue() < 0xe800)
12228         CurSuffix = 'n';
12229       else if (Value->getValue() >= 0xe8000000)
12230         CurSuffix = 'w';
12231       else
12232         return Error(Loc, "cannot determine Thumb instruction size, "
12233                           "use inst.n/inst.w instead");
12234       break;
12235     default:
12236       llvm_unreachable("only supported widths are 2 and 4");
12237     }
12238 
12239     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
12240     forwardITPosition();
12241     forwardVPTPosition();
12242     return false;
12243   };
12244 
12245   if (parseOptionalToken(AsmToken::EndOfStatement))
12246     return Error(Loc, "expected expression following directive");
12247   if (parseMany(parseOne))
12248     return true;
12249   return false;
12250 }
12251 
12252 /// parseDirectiveLtorg
12253 ///  ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)12254 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12255   if (parseEOL())
12256     return true;
12257   getTargetStreamer().emitCurrentConstantPool();
12258   return false;
12259 }
12260 
parseDirectiveEven(SMLoc L)12261 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12262   const MCSection *Section = getStreamer().getCurrentSectionOnly();
12263 
12264   if (parseEOL())
12265     return true;
12266 
12267   if (!Section) {
12268     getStreamer().initSections(false, getSTI());
12269     Section = getStreamer().getCurrentSectionOnly();
12270   }
12271 
12272   assert(Section && "must have section to emit alignment");
12273   if (Section->useCodeAlign())
12274     getStreamer().emitCodeAlignment(Align(2), &getSTI());
12275   else
12276     getStreamer().emitValueToAlignment(Align(2));
12277 
12278   return false;
12279 }
12280 
12281 /// parseDirectivePersonalityIndex
12282 ///   ::= .personalityindex index
parseDirectivePersonalityIndex(SMLoc L)12283 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12284   MCAsmParser &Parser = getParser();
12285   bool HasExistingPersonality = UC.hasPersonality();
12286 
12287   const MCExpr *IndexExpression;
12288   SMLoc IndexLoc = Parser.getTok().getLoc();
12289   if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12290     return true;
12291   }
12292 
12293   UC.recordPersonalityIndex(L);
12294 
12295   if (!UC.hasFnStart()) {
12296     return Error(L, ".fnstart must precede .personalityindex directive");
12297   }
12298   if (UC.cantUnwind()) {
12299     Error(L, ".personalityindex cannot be used with .cantunwind");
12300     UC.emitCantUnwindLocNotes();
12301     return true;
12302   }
12303   if (UC.hasHandlerData()) {
12304     Error(L, ".personalityindex must precede .handlerdata directive");
12305     UC.emitHandlerDataLocNotes();
12306     return true;
12307   }
12308   if (HasExistingPersonality) {
12309     Error(L, "multiple personality directives");
12310     UC.emitPersonalityLocNotes();
12311     return true;
12312   }
12313 
12314   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12315   if (!CE)
12316     return Error(IndexLoc, "index must be a constant number");
12317   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12318     return Error(IndexLoc,
12319                  "personality routine index should be in range [0-3]");
12320 
12321   getTargetStreamer().emitPersonalityIndex(CE->getValue());
12322   return false;
12323 }
12324 
12325 /// parseDirectiveUnwindRaw
12326 ///   ::= .unwind_raw offset, opcode [, opcode...]
parseDirectiveUnwindRaw(SMLoc L)12327 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12328   MCAsmParser &Parser = getParser();
12329   int64_t StackOffset;
12330   const MCExpr *OffsetExpr;
12331   SMLoc OffsetLoc = getLexer().getLoc();
12332 
12333   if (!UC.hasFnStart())
12334     return Error(L, ".fnstart must precede .unwind_raw directives");
12335   if (getParser().parseExpression(OffsetExpr))
12336     return Error(OffsetLoc, "expected expression");
12337 
12338   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12339   if (!CE)
12340     return Error(OffsetLoc, "offset must be a constant");
12341 
12342   StackOffset = CE->getValue();
12343 
12344   if (Parser.parseComma())
12345     return true;
12346 
12347   SmallVector<uint8_t, 16> Opcodes;
12348 
12349   auto parseOne = [&]() -> bool {
12350     const MCExpr *OE = nullptr;
12351     SMLoc OpcodeLoc = getLexer().getLoc();
12352     if (check(getLexer().is(AsmToken::EndOfStatement) ||
12353                   Parser.parseExpression(OE),
12354               OpcodeLoc, "expected opcode expression"))
12355       return true;
12356     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12357     if (!OC)
12358       return Error(OpcodeLoc, "opcode value must be a constant");
12359     const int64_t Opcode = OC->getValue();
12360     if (Opcode & ~0xff)
12361       return Error(OpcodeLoc, "invalid opcode");
12362     Opcodes.push_back(uint8_t(Opcode));
12363     return false;
12364   };
12365 
12366   // Must have at least 1 element
12367   SMLoc OpcodeLoc = getLexer().getLoc();
12368   if (parseOptionalToken(AsmToken::EndOfStatement))
12369     return Error(OpcodeLoc, "expected opcode expression");
12370   if (parseMany(parseOne))
12371     return true;
12372 
12373   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12374   return false;
12375 }
12376 
12377 /// parseDirectiveTLSDescSeq
12378 ///   ::= .tlsdescseq tls-variable
parseDirectiveTLSDescSeq(SMLoc L)12379 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12380   MCAsmParser &Parser = getParser();
12381 
12382   if (getLexer().isNot(AsmToken::Identifier))
12383     return TokError("expected variable after '.tlsdescseq' directive");
12384 
12385   const MCSymbolRefExpr *SRE =
12386     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12387                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12388   Lex();
12389 
12390   if (parseEOL())
12391     return true;
12392 
12393   getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12394   return false;
12395 }
12396 
12397 /// parseDirectiveMovSP
12398 ///  ::= .movsp reg [, #offset]
parseDirectiveMovSP(SMLoc L)12399 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12400   MCAsmParser &Parser = getParser();
12401   if (!UC.hasFnStart())
12402     return Error(L, ".fnstart must precede .movsp directives");
12403   if (UC.getFPReg() != ARM::SP)
12404     return Error(L, "unexpected .movsp directive");
12405 
12406   SMLoc SPRegLoc = Parser.getTok().getLoc();
12407   int SPReg = tryParseRegister();
12408   if (SPReg == -1)
12409     return Error(SPRegLoc, "register expected");
12410   if (SPReg == ARM::SP || SPReg == ARM::PC)
12411     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12412 
12413   int64_t Offset = 0;
12414   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12415     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12416       return true;
12417 
12418     const MCExpr *OffsetExpr;
12419     SMLoc OffsetLoc = Parser.getTok().getLoc();
12420 
12421     if (Parser.parseExpression(OffsetExpr))
12422       return Error(OffsetLoc, "malformed offset expression");
12423 
12424     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12425     if (!CE)
12426       return Error(OffsetLoc, "offset must be an immediate constant");
12427 
12428     Offset = CE->getValue();
12429   }
12430 
12431   if (parseEOL())
12432     return true;
12433 
12434   getTargetStreamer().emitMovSP(SPReg, Offset);
12435   UC.saveFPReg(SPReg);
12436 
12437   return false;
12438 }
12439 
12440 /// parseDirectiveObjectArch
12441 ///   ::= .object_arch name
parseDirectiveObjectArch(SMLoc L)12442 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12443   MCAsmParser &Parser = getParser();
12444   if (getLexer().isNot(AsmToken::Identifier))
12445     return Error(getLexer().getLoc(), "unexpected token");
12446 
12447   StringRef Arch = Parser.getTok().getString();
12448   SMLoc ArchLoc = Parser.getTok().getLoc();
12449   Lex();
12450 
12451   ARM::ArchKind ID = ARM::parseArch(Arch);
12452 
12453   if (ID == ARM::ArchKind::INVALID)
12454     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12455   if (parseToken(AsmToken::EndOfStatement))
12456     return true;
12457 
12458   getTargetStreamer().emitObjectArch(ID);
12459   return false;
12460 }
12461 
12462 /// parseDirectiveAlign
12463 ///   ::= .align
parseDirectiveAlign(SMLoc L)12464 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12465   // NOTE: if this is not the end of the statement, fall back to the target
12466   // agnostic handling for this directive which will correctly handle this.
12467   if (parseOptionalToken(AsmToken::EndOfStatement)) {
12468     // '.align' is target specifically handled to mean 2**2 byte alignment.
12469     const MCSection *Section = getStreamer().getCurrentSectionOnly();
12470     assert(Section && "must have section to emit alignment");
12471     if (Section->useCodeAlign())
12472       getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12473     else
12474       getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12475     return false;
12476   }
12477   return true;
12478 }
12479 
12480 /// parseDirectiveThumbSet
12481 ///  ::= .thumb_set name, value
parseDirectiveThumbSet(SMLoc L)12482 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12483   MCAsmParser &Parser = getParser();
12484 
12485   StringRef Name;
12486   if (check(Parser.parseIdentifier(Name),
12487             "expected identifier after '.thumb_set'") ||
12488       Parser.parseComma())
12489     return true;
12490 
12491   MCSymbol *Sym;
12492   const MCExpr *Value;
12493   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12494                                                Parser, Sym, Value))
12495     return true;
12496 
12497   getTargetStreamer().emitThumbSet(Sym, Value);
12498   return false;
12499 }
12500 
12501 /// parseDirectiveSEHAllocStack
12502 /// ::= .seh_stackalloc
12503 /// ::= .seh_stackalloc_w
parseDirectiveSEHAllocStack(SMLoc L,bool Wide)12504 bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12505   int64_t Size;
12506   if (parseImmExpr(Size))
12507     return true;
12508   getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12509   return false;
12510 }
12511 
12512 /// parseDirectiveSEHSaveRegs
12513 /// ::= .seh_save_regs
12514 /// ::= .seh_save_regs_w
parseDirectiveSEHSaveRegs(SMLoc L,bool Wide)12515 bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12516   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12517 
12518   if (parseRegisterList(Operands) || parseEOL())
12519     return true;
12520   ARMOperand &Op = (ARMOperand &)*Operands[0];
12521   if (!Op.isRegList())
12522     return Error(L, ".seh_save_regs{_w} expects GPR registers");
12523   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12524   uint32_t Mask = 0;
12525   for (size_t i = 0; i < RegList.size(); ++i) {
12526     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12527     if (Reg == 15) // pc -> lr
12528       Reg = 14;
12529     if (Reg == 13)
12530       return Error(L, ".seh_save_regs{_w} can't include SP");
12531     assert(Reg < 16U && "Register out of range");
12532     unsigned Bit = (1u << Reg);
12533     Mask |= Bit;
12534   }
12535   if (!Wide && (Mask & 0x1f00) != 0)
12536     return Error(L,
12537                  ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12538   getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12539   return false;
12540 }
12541 
12542 /// parseDirectiveSEHSaveSP
12543 /// ::= .seh_save_sp
parseDirectiveSEHSaveSP(SMLoc L)12544 bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12545   int Reg = tryParseRegister();
12546   if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12547     return Error(L, "expected GPR");
12548   unsigned Index = MRI->getEncodingValue(Reg);
12549   if (Index > 14 || Index == 13)
12550     return Error(L, "invalid register for .seh_save_sp");
12551   getTargetStreamer().emitARMWinCFISaveSP(Index);
12552   return false;
12553 }
12554 
12555 /// parseDirectiveSEHSaveFRegs
12556 /// ::= .seh_save_fregs
parseDirectiveSEHSaveFRegs(SMLoc L)12557 bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12558   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12559 
12560   if (parseRegisterList(Operands) || parseEOL())
12561     return true;
12562   ARMOperand &Op = (ARMOperand &)*Operands[0];
12563   if (!Op.isDPRRegList())
12564     return Error(L, ".seh_save_fregs expects DPR registers");
12565   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12566   uint32_t Mask = 0;
12567   for (size_t i = 0; i < RegList.size(); ++i) {
12568     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12569     assert(Reg < 32U && "Register out of range");
12570     unsigned Bit = (1u << Reg);
12571     Mask |= Bit;
12572   }
12573 
12574   if (Mask == 0)
12575     return Error(L, ".seh_save_fregs missing registers");
12576 
12577   unsigned First = 0;
12578   while ((Mask & 1) == 0) {
12579     First++;
12580     Mask >>= 1;
12581   }
12582   if (((Mask + 1) & Mask) != 0)
12583     return Error(L,
12584                  ".seh_save_fregs must take a contiguous range of registers");
12585   unsigned Last = First;
12586   while ((Mask & 2) != 0) {
12587     Last++;
12588     Mask >>= 1;
12589   }
12590   if (First < 16 && Last >= 16)
12591     return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12592   getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12593   return false;
12594 }
12595 
12596 /// parseDirectiveSEHSaveLR
12597 /// ::= .seh_save_lr
parseDirectiveSEHSaveLR(SMLoc L)12598 bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12599   int64_t Offset;
12600   if (parseImmExpr(Offset))
12601     return true;
12602   getTargetStreamer().emitARMWinCFISaveLR(Offset);
12603   return false;
12604 }
12605 
12606 /// parseDirectiveSEHPrologEnd
12607 /// ::= .seh_endprologue
12608 /// ::= .seh_endprologue_fragment
parseDirectiveSEHPrologEnd(SMLoc L,bool Fragment)12609 bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12610   getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12611   return false;
12612 }
12613 
12614 /// parseDirectiveSEHNop
12615 /// ::= .seh_nop
12616 /// ::= .seh_nop_w
parseDirectiveSEHNop(SMLoc L,bool Wide)12617 bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12618   getTargetStreamer().emitARMWinCFINop(Wide);
12619   return false;
12620 }
12621 
12622 /// parseDirectiveSEHEpilogStart
12623 /// ::= .seh_startepilogue
12624 /// ::= .seh_startepilogue_cond
parseDirectiveSEHEpilogStart(SMLoc L,bool Condition)12625 bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12626   unsigned CC = ARMCC::AL;
12627   if (Condition) {
12628     MCAsmParser &Parser = getParser();
12629     SMLoc S = Parser.getTok().getLoc();
12630     const AsmToken &Tok = Parser.getTok();
12631     if (!Tok.is(AsmToken::Identifier))
12632       return Error(S, ".seh_startepilogue_cond missing condition");
12633     CC = ARMCondCodeFromString(Tok.getString());
12634     if (CC == ~0U)
12635       return Error(S, "invalid condition");
12636     Parser.Lex(); // Eat the token.
12637   }
12638 
12639   getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12640   return false;
12641 }
12642 
12643 /// parseDirectiveSEHEpilogEnd
12644 /// ::= .seh_endepilogue
parseDirectiveSEHEpilogEnd(SMLoc L)12645 bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12646   getTargetStreamer().emitARMWinCFIEpilogEnd();
12647   return false;
12648 }
12649 
12650 /// parseDirectiveSEHCustom
12651 /// ::= .seh_custom
parseDirectiveSEHCustom(SMLoc L)12652 bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12653   unsigned Opcode = 0;
12654   do {
12655     int64_t Byte;
12656     if (parseImmExpr(Byte))
12657       return true;
12658     if (Byte > 0xff || Byte < 0)
12659       return Error(L, "Invalid byte value in .seh_custom");
12660     if (Opcode > 0x00ffffff)
12661       return Error(L, "Too many bytes in .seh_custom");
12662     // Store the bytes as one big endian number in Opcode. In a multi byte
12663     // opcode sequence, the first byte can't be zero.
12664     Opcode = (Opcode << 8) | Byte;
12665   } while (parseOptionalToken(AsmToken::Comma));
12666   getTargetStreamer().emitARMWinCFICustom(Opcode);
12667   return false;
12668 }
12669 
12670 /// Force static initialization.
LLVMInitializeARMAsmParser()12671 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12672   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12673   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12674   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12675   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12676 }
12677 
12678 #define GET_REGISTER_MATCHER
12679 #define GET_SUBTARGET_FEATURE_NAME
12680 #define GET_MATCHER_IMPLEMENTATION
12681 #define GET_MNEMONIC_SPELL_CHECKER
12682 #include "ARMGenAsmMatcher.inc"
12683 
12684 // Some diagnostics need to vary with subtarget features, so they are handled
12685 // here. For example, the DPR class has either 16 or 32 registers, depending
12686 // on the FPU available.
12687 const char *
getCustomOperandDiag(ARMMatchResultTy MatchError)12688 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12689   switch (MatchError) {
12690   // rGPR contains sp starting with ARMv8.
12691   case Match_rGPR:
12692     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12693                       : "operand must be a register in range [r0, r12] or r14";
12694   // DPR contains 16 registers for some FPUs, and 32 for others.
12695   case Match_DPR:
12696     return hasD32() ? "operand must be a register in range [d0, d31]"
12697                     : "operand must be a register in range [d0, d15]";
12698   case Match_DPR_RegList:
12699     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12700                     : "operand must be a list of registers in range [d0, d15]";
12701 
12702   // For all other diags, use the static string from tablegen.
12703   default:
12704     return getMatchKindDiag(MatchError);
12705   }
12706 }
12707 
12708 // Process the list of near-misses, throwing away ones we don't want to report
12709 // to the user, and converting the rest to a source location and string that
12710 // should be reported.
12711 void
FilterNearMisses(SmallVectorImpl<NearMissInfo> & NearMissesIn,SmallVectorImpl<NearMissMessage> & NearMissesOut,SMLoc IDLoc,OperandVector & Operands)12712 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12713                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
12714                                SMLoc IDLoc, OperandVector &Operands) {
12715   // TODO: If operand didn't match, sub in a dummy one and run target
12716   // predicate, so that we can avoid reporting near-misses that are invalid?
12717   // TODO: Many operand types dont have SuperClasses set, so we report
12718   // redundant ones.
12719   // TODO: Some operands are superclasses of registers (e.g.
12720   // MCK_RegShiftedImm), we don't have any way to represent that currently.
12721   // TODO: This is not all ARM-specific, can some of it be factored out?
12722 
12723   // Record some information about near-misses that we have already seen, so
12724   // that we can avoid reporting redundant ones. For example, if there are
12725   // variants of an instruction that take 8- and 16-bit immediates, we want
12726   // to only report the widest one.
12727   std::multimap<unsigned, unsigned> OperandMissesSeen;
12728   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12729   bool ReportedTooFewOperands = false;
12730 
12731   unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
12732 
12733   // Process the near-misses in reverse order, so that we see more general ones
12734   // first, and so can avoid emitting more specific ones.
12735   for (NearMissInfo &I : reverse(NearMissesIn)) {
12736     switch (I.getKind()) {
12737     case NearMissInfo::NearMissOperand: {
12738       SMLoc OperandLoc =
12739           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12740       const char *OperandDiag =
12741           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12742 
12743       // If we have already emitted a message for a superclass, don't also report
12744       // the sub-class. We consider all operand classes that we don't have a
12745       // specialised diagnostic for to be equal for the propose of this check,
12746       // so that we don't report the generic error multiple times on the same
12747       // operand.
12748       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12749       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12750       if (std::any_of(PrevReports.first, PrevReports.second,
12751                       [DupCheckMatchClass](
12752                           const std::pair<unsigned, unsigned> Pair) {
12753             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12754               return Pair.second == DupCheckMatchClass;
12755             else
12756               return isSubclass((MatchClassKind)DupCheckMatchClass,
12757                                 (MatchClassKind)Pair.second);
12758           }))
12759         break;
12760       OperandMissesSeen.insert(
12761           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12762 
12763       NearMissMessage Message;
12764       Message.Loc = OperandLoc;
12765       if (OperandDiag) {
12766         Message.Message = OperandDiag;
12767       } else if (I.getOperandClass() == InvalidMatchClass) {
12768         Message.Message = "too many operands for instruction";
12769       } else {
12770         Message.Message = "invalid operand for instruction";
12771         LLVM_DEBUG(
12772             dbgs() << "Missing diagnostic string for operand class "
12773                    << getMatchClassName((MatchClassKind)I.getOperandClass())
12774                    << I.getOperandClass() << ", error " << I.getOperandError()
12775                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12776       }
12777       NearMissesOut.emplace_back(Message);
12778       break;
12779     }
12780     case NearMissInfo::NearMissFeature: {
12781       const FeatureBitset &MissingFeatures = I.getFeatures();
12782       // Don't report the same set of features twice.
12783       if (FeatureMissesSeen.count(MissingFeatures))
12784         break;
12785       FeatureMissesSeen.insert(MissingFeatures);
12786 
12787       // Special case: don't report a feature set which includes arm-mode for
12788       // targets that don't have ARM mode.
12789       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12790         break;
12791       // Don't report any near-misses that both require switching instruction
12792       // set, and adding other subtarget features.
12793       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12794           MissingFeatures.count() > 1)
12795         break;
12796       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12797           MissingFeatures.count() > 1)
12798         break;
12799       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12800           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12801                                              Feature_IsThumbBit})).any())
12802         break;
12803       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12804         break;
12805 
12806       NearMissMessage Message;
12807       Message.Loc = IDLoc;
12808       raw_svector_ostream OS(Message.Message);
12809 
12810       OS << "instruction requires:";
12811       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12812         if (MissingFeatures.test(i))
12813           OS << ' ' << getSubtargetFeatureName(i);
12814 
12815       NearMissesOut.emplace_back(Message);
12816 
12817       break;
12818     }
12819     case NearMissInfo::NearMissPredicate: {
12820       NearMissMessage Message;
12821       Message.Loc = IDLoc;
12822       switch (I.getPredicateError()) {
12823       case Match_RequiresNotITBlock:
12824         Message.Message = "flag setting instruction only valid outside IT block";
12825         break;
12826       case Match_RequiresITBlock:
12827         Message.Message = "instruction only valid inside IT block";
12828         break;
12829       case Match_RequiresV6:
12830         Message.Message = "instruction variant requires ARMv6 or later";
12831         break;
12832       case Match_RequiresThumb2:
12833         Message.Message = "instruction variant requires Thumb2";
12834         break;
12835       case Match_RequiresV8:
12836         Message.Message = "instruction variant requires ARMv8 or later";
12837         break;
12838       case Match_RequiresFlagSetting:
12839         Message.Message = "no flag-preserving variant of this instruction available";
12840         break;
12841       case Match_InvalidTiedOperand: {
12842         ARMOperand &Op = static_cast<ARMOperand &>(*Operands[0]);
12843         if (Op.isToken() && Op.getToken() == "mul") {
12844           Message.Message = "destination register must match a source register";
12845           Message.Loc = Operands[MnemonicOpsEndInd]->getStartLoc();
12846         } else {
12847           llvm_unreachable("Match_InvalidTiedOperand only used for tMUL.");
12848         }
12849         break;
12850       }
12851       case Match_InvalidOperand:
12852         Message.Message = "invalid operand for instruction";
12853         break;
12854       default:
12855         llvm_unreachable("Unhandled target predicate error");
12856         break;
12857       }
12858       NearMissesOut.emplace_back(Message);
12859       break;
12860     }
12861     case NearMissInfo::NearMissTooFewOperands: {
12862       if (!ReportedTooFewOperands) {
12863         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12864         NearMissesOut.emplace_back(NearMissMessage{
12865             EndLoc, StringRef("too few operands for instruction")});
12866         ReportedTooFewOperands = true;
12867       }
12868       break;
12869     }
12870     case NearMissInfo::NoNearMiss:
12871       // This should never leave the matcher.
12872       llvm_unreachable("not a near-miss");
12873       break;
12874     }
12875   }
12876 }
12877 
ReportNearMisses(SmallVectorImpl<NearMissInfo> & NearMisses,SMLoc IDLoc,OperandVector & Operands)12878 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12879                                     SMLoc IDLoc, OperandVector &Operands) {
12880   SmallVector<NearMissMessage, 4> Messages;
12881   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12882 
12883   if (Messages.size() == 0) {
12884     // No near-misses were found, so the best we can do is "invalid
12885     // instruction".
12886     Error(IDLoc, "invalid instruction");
12887   } else if (Messages.size() == 1) {
12888     // One near miss was found, report it as the sole error.
12889     Error(Messages[0].Loc, Messages[0].Message);
12890   } else {
12891     // More than one near miss, so report a generic "invalid instruction"
12892     // error, followed by notes for each of the near-misses.
12893     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12894     for (auto &M : Messages) {
12895       Note(M.Loc, M.Message);
12896     }
12897   }
12898 }
12899 
enableArchExtFeature(StringRef Name,SMLoc & ExtLoc)12900 bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12901   // FIXME: This structure should be moved inside ARMTargetParser
12902   // when we start to table-generate them, and we can use the ARM
12903   // flags below, that were generated by table-gen.
12904   static const struct {
12905     const uint64_t Kind;
12906     const FeatureBitset ArchCheck;
12907     const FeatureBitset Features;
12908   } Extensions[] = {
12909       {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12910       {ARM::AEK_AES,
12911        {Feature_HasV8Bit},
12912        {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12913       {ARM::AEK_SHA2,
12914        {Feature_HasV8Bit},
12915        {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12916       {ARM::AEK_CRYPTO,
12917        {Feature_HasV8Bit},
12918        {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12919       {(ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP),
12920        {Feature_HasV8_1MMainlineBit},
12921        {ARM::HasMVEFloatOps}},
12922       {ARM::AEK_FP,
12923        {Feature_HasV8Bit},
12924        {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12925       {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12926        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12927        {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12928       {ARM::AEK_MP,
12929        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12930        {ARM::FeatureMP}},
12931       {ARM::AEK_SIMD,
12932        {Feature_HasV8Bit},
12933        {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12934       {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12935       // FIXME: Only available in A-class, isel not predicated
12936       {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12937       {ARM::AEK_FP16,
12938        {Feature_HasV8_2aBit},
12939        {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12940       {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12941       {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12942       {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12943       // FIXME: Unsupported extensions.
12944       {ARM::AEK_OS, {}, {}},
12945       {ARM::AEK_IWMMXT, {}, {}},
12946       {ARM::AEK_IWMMXT2, {}, {}},
12947       {ARM::AEK_MAVERICK, {}, {}},
12948       {ARM::AEK_XSCALE, {}, {}},
12949   };
12950   bool EnableFeature = !Name.consume_front_insensitive("no");
12951   uint64_t FeatureKind = ARM::parseArchExt(Name);
12952   if (FeatureKind == ARM::AEK_INVALID)
12953     return Error(ExtLoc, "unknown architectural extension: " + Name);
12954 
12955   for (const auto &Extension : Extensions) {
12956     if (Extension.Kind != FeatureKind)
12957       continue;
12958 
12959     if (Extension.Features.none())
12960       return Error(ExtLoc, "unsupported architectural extension: " + Name);
12961 
12962     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12963       return Error(ExtLoc, "architectural extension '" + Name +
12964                                "' is not "
12965                                "allowed for the current base architecture");
12966 
12967     MCSubtargetInfo &STI = copySTI();
12968     if (EnableFeature) {
12969       STI.SetFeatureBitsTransitively(Extension.Features);
12970     } else {
12971       STI.ClearFeatureBitsTransitively(Extension.Features);
12972     }
12973     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12974     setAvailableFeatures(Features);
12975     return true;
12976   }
12977   return false;
12978 }
12979 
12980 /// parseDirectiveArchExtension
12981 ///   ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)12982 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12983 
12984   MCAsmParser &Parser = getParser();
12985 
12986   if (getLexer().isNot(AsmToken::Identifier))
12987     return Error(getLexer().getLoc(), "expected architecture extension name");
12988 
12989   StringRef Name = Parser.getTok().getString();
12990   SMLoc ExtLoc = Parser.getTok().getLoc();
12991   Lex();
12992 
12993   if (parseEOL())
12994     return true;
12995 
12996   if (Name == "nocrypto") {
12997     enableArchExtFeature("nosha2", ExtLoc);
12998     enableArchExtFeature("noaes", ExtLoc);
12999   }
13000 
13001   if (enableArchExtFeature(Name, ExtLoc))
13002     return false;
13003 
13004   return Error(ExtLoc, "unknown architectural extension: " + Name);
13005 }
13006 
13007 // Define this matcher function after the auto-generated include so we
13008 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)13009 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
13010                                                   unsigned Kind) {
13011   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
13012   // If the kind is a token for a literal immediate, check if our asm
13013   // operand matches. This is for InstAliases which have a fixed-value
13014   // immediate in the syntax.
13015   switch (Kind) {
13016   default: break;
13017   case MCK__HASH_0:
13018     if (Op.isImm())
13019       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13020         if (CE->getValue() == 0)
13021           return Match_Success;
13022     break;
13023   case MCK__HASH_8:
13024     if (Op.isImm())
13025       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13026         if (CE->getValue() == 8)
13027           return Match_Success;
13028     break;
13029   case MCK__HASH_16:
13030     if (Op.isImm())
13031       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
13032         if (CE->getValue() == 16)
13033           return Match_Success;
13034     break;
13035   case MCK_ModImm:
13036     if (Op.isImm()) {
13037       const MCExpr *SOExpr = Op.getImm();
13038       int64_t Value;
13039       if (!SOExpr->evaluateAsAbsolute(Value))
13040         return Match_Success;
13041       assert((Value >= std::numeric_limits<int32_t>::min() &&
13042               Value <= std::numeric_limits<uint32_t>::max()) &&
13043              "expression value must be representable in 32 bits");
13044     }
13045     break;
13046   case MCK_rGPR:
13047     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
13048       return Match_Success;
13049     return Match_rGPR;
13050   }
13051   return Match_InvalidOperand;
13052 }
13053 
isMnemonicVPTPredicable(StringRef Mnemonic,StringRef ExtraToken)13054 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
13055                                            StringRef ExtraToken) {
13056   if (!hasMVE())
13057     return false;
13058 
13059   if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13060       (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
13061       (Mnemonic.starts_with("vmov") &&
13062        !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
13063          ExtraToken == ".8")) ||
13064       (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
13065       (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
13066     return true;
13067 
13068   const char *predicable_prefixes[] = {
13069       "vabav",      "vabd",     "vabs",      "vadc",       "vadd",
13070       "vaddlv",     "vaddv",    "vand",      "vbic",       "vbrsr",
13071       "vcadd",      "vcls",     "vclz",      "vcmla",      "vcmp",
13072       "vcmul",      "vctp",     "vcvt",      "vddup",      "vdup",
13073       "vdwdup",     "veor",     "vfma",      "vfmas",      "vfms",
13074       "vhadd",      "vhcadd",   "vhsub",     "vidup",      "viwdup",
13075       "vldrb",      "vldrd",    "vldrw",     "vmax",       "vmaxa",
13076       "vmaxav",     "vmaxnm",   "vmaxnma",   "vmaxnmav",   "vmaxnmv",
13077       "vmaxv",      "vmin",     "vminav",    "vminnm",     "vminnmav",
13078       "vminnmv",    "vminv",    "vmla",      "vmladav",    "vmlaldav",
13079       "vmlalv",     "vmlas",    "vmlav",     "vmlsdav",    "vmlsldav",
13080       "vmovlb",     "vmovlt",   "vmovnb",    "vmovnt",     "vmul",
13081       "vmvn",       "vneg",     "vorn",      "vorr",       "vpnot",
13082       "vpsel",      "vqabs",    "vqadd",     "vqdmladh",   "vqdmlah",
13083       "vqdmlash",   "vqdmlsdh", "vqdmulh",   "vqdmull",    "vqmovn",
13084       "vqmovun",    "vqneg",    "vqrdmladh", "vqrdmlah",   "vqrdmlash",
13085       "vqrdmlsdh",  "vqrdmulh", "vqrshl",    "vqrshrn",    "vqrshrun",
13086       "vqshl",      "vqshrn",   "vqshrun",   "vqsub",      "vrev16",
13087       "vrev32",     "vrev64",   "vrhadd",    "vrmlaldavh", "vrmlalvh",
13088       "vrmlsldavh", "vrmulh",   "vrshl",     "vrshr",      "vrshrn",
13089       "vsbc",       "vshl",     "vshlc",     "vshll",      "vshr",
13090       "vshrn",      "vsli",     "vsri",      "vstrb",      "vstrd",
13091       "vstrw",      "vsub"};
13092 
13093   return std::any_of(
13094       std::begin(predicable_prefixes), std::end(predicable_prefixes),
13095       [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
13096 }
13097 
defaultCondCodeOp()13098 std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13099   return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc(), *this);
13100 }
13101 
defaultCCOutOp()13102 std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13103   return ARMOperand::CreateCCOut(0, SMLoc(), *this);
13104 }
13105 
defaultVPTPredOp()13106 std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
13107   return ARMOperand::CreateVPTPred(ARMVCC::None, SMLoc(), *this);
13108 }
13109