xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp (revision 1323ec571215a77ddd21294f0871979d5ad6b992)
1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/X86BaseInfo.h"
10 #include "MCTargetDesc/X86IntelInstPrinter.h"
11 #include "MCTargetDesc/X86MCExpr.h"
12 #include "MCTargetDesc/X86TargetStreamer.h"
13 #include "TargetInfo/X86TargetInfo.h"
14 #include "X86AsmParserCommon.h"
15 #include "X86Operand.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallString.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCParser/MCAsmLexer.h"
26 #include "llvm/MC/MCParser/MCAsmParser.h"
27 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
29 #include "llvm/MC/MCRegisterInfo.h"
30 #include "llvm/MC/MCSection.h"
31 #include "llvm/MC/MCStreamer.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/MC/MCSymbol.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/SourceMgr.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <algorithm>
40 #include <memory>
41 
42 using namespace llvm;
43 
44 static cl::opt<bool> LVIInlineAsmHardening(
45     "x86-experimental-lvi-inline-asm-hardening",
46     cl::desc("Harden inline assembly code that may be vulnerable to Load Value"
47              " Injection (LVI). This feature is experimental."), cl::Hidden);
48 
49 static bool checkScale(unsigned Scale, StringRef &ErrMsg) {
50   if (Scale != 1 && Scale != 2 && Scale != 4 && Scale != 8) {
51     ErrMsg = "scale factor in address must be 1, 2, 4 or 8";
52     return true;
53   }
54   return false;
55 }
56 
57 namespace {
58 
59 static const char OpPrecedence[] = {
60     0,  // IC_OR
61     1,  // IC_XOR
62     2,  // IC_AND
63     4,  // IC_LSHIFT
64     4,  // IC_RSHIFT
65     5,  // IC_PLUS
66     5,  // IC_MINUS
67     6,  // IC_MULTIPLY
68     6,  // IC_DIVIDE
69     6,  // IC_MOD
70     7,  // IC_NOT
71     8,  // IC_NEG
72     9,  // IC_RPAREN
73     10, // IC_LPAREN
74     0,  // IC_IMM
75     0,  // IC_REGISTER
76     3,  // IC_EQ
77     3,  // IC_NE
78     3,  // IC_LT
79     3,  // IC_LE
80     3,  // IC_GT
81     3   // IC_GE
82 };
83 
84 class X86AsmParser : public MCTargetAsmParser {
85   ParseInstructionInfo *InstInfo;
86   bool Code16GCC;
87   unsigned ForcedDataPrefix = 0;
88 
89   enum VEXEncoding {
90     VEXEncoding_Default,
91     VEXEncoding_VEX,
92     VEXEncoding_VEX2,
93     VEXEncoding_VEX3,
94     VEXEncoding_EVEX,
95   };
96 
97   VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
98 
99   enum DispEncoding {
100     DispEncoding_Default,
101     DispEncoding_Disp8,
102     DispEncoding_Disp32,
103   };
104 
105   DispEncoding ForcedDispEncoding = DispEncoding_Default;
106 
107 private:
108   SMLoc consumeToken() {
109     MCAsmParser &Parser = getParser();
110     SMLoc Result = Parser.getTok().getLoc();
111     Parser.Lex();
112     return Result;
113   }
114 
115   X86TargetStreamer &getTargetStreamer() {
116     assert(getParser().getStreamer().getTargetStreamer() &&
117            "do not have a target streamer");
118     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
119     return static_cast<X86TargetStreamer &>(TS);
120   }
121 
122   unsigned MatchInstruction(const OperandVector &Operands, MCInst &Inst,
123                             uint64_t &ErrorInfo, FeatureBitset &MissingFeatures,
124                             bool matchingInlineAsm, unsigned VariantID = 0) {
125     // In Code16GCC mode, match as 32-bit.
126     if (Code16GCC)
127       SwitchMode(X86::Mode32Bit);
128     unsigned rv = MatchInstructionImpl(Operands, Inst, ErrorInfo,
129                                        MissingFeatures, matchingInlineAsm,
130                                        VariantID);
131     if (Code16GCC)
132       SwitchMode(X86::Mode16Bit);
133     return rv;
134   }
135 
136   enum InfixCalculatorTok {
137     IC_OR = 0,
138     IC_XOR,
139     IC_AND,
140     IC_LSHIFT,
141     IC_RSHIFT,
142     IC_PLUS,
143     IC_MINUS,
144     IC_MULTIPLY,
145     IC_DIVIDE,
146     IC_MOD,
147     IC_NOT,
148     IC_NEG,
149     IC_RPAREN,
150     IC_LPAREN,
151     IC_IMM,
152     IC_REGISTER,
153     IC_EQ,
154     IC_NE,
155     IC_LT,
156     IC_LE,
157     IC_GT,
158     IC_GE
159   };
160 
161   enum IntelOperatorKind {
162     IOK_INVALID = 0,
163     IOK_LENGTH,
164     IOK_SIZE,
165     IOK_TYPE,
166   };
167 
168   enum MasmOperatorKind {
169     MOK_INVALID = 0,
170     MOK_LENGTHOF,
171     MOK_SIZEOF,
172     MOK_TYPE,
173   };
174 
175   class InfixCalculator {
176     typedef std::pair< InfixCalculatorTok, int64_t > ICToken;
177     SmallVector<InfixCalculatorTok, 4> InfixOperatorStack;
178     SmallVector<ICToken, 4> PostfixStack;
179 
180     bool isUnaryOperator(InfixCalculatorTok Op) const {
181       return Op == IC_NEG || Op == IC_NOT;
182     }
183 
184   public:
185     int64_t popOperand() {
186       assert (!PostfixStack.empty() && "Poped an empty stack!");
187       ICToken Op = PostfixStack.pop_back_val();
188       if (!(Op.first == IC_IMM || Op.first == IC_REGISTER))
189         return -1; // The invalid Scale value will be caught later by checkScale
190       return Op.second;
191     }
192     void pushOperand(InfixCalculatorTok Op, int64_t Val = 0) {
193       assert ((Op == IC_IMM || Op == IC_REGISTER) &&
194               "Unexpected operand!");
195       PostfixStack.push_back(std::make_pair(Op, Val));
196     }
197 
198     void popOperator() { InfixOperatorStack.pop_back(); }
199     void pushOperator(InfixCalculatorTok Op) {
200       // Push the new operator if the stack is empty.
201       if (InfixOperatorStack.empty()) {
202         InfixOperatorStack.push_back(Op);
203         return;
204       }
205 
206       // Push the new operator if it has a higher precedence than the operator
207       // on the top of the stack or the operator on the top of the stack is a
208       // left parentheses.
209       unsigned Idx = InfixOperatorStack.size() - 1;
210       InfixCalculatorTok StackOp = InfixOperatorStack[Idx];
211       if (OpPrecedence[Op] > OpPrecedence[StackOp] || StackOp == IC_LPAREN) {
212         InfixOperatorStack.push_back(Op);
213         return;
214       }
215 
216       // The operator on the top of the stack has higher precedence than the
217       // new operator.
218       unsigned ParenCount = 0;
219       while (1) {
220         // Nothing to process.
221         if (InfixOperatorStack.empty())
222           break;
223 
224         Idx = InfixOperatorStack.size() - 1;
225         StackOp = InfixOperatorStack[Idx];
226         if (!(OpPrecedence[StackOp] >= OpPrecedence[Op] || ParenCount))
227           break;
228 
229         // If we have an even parentheses count and we see a left parentheses,
230         // then stop processing.
231         if (!ParenCount && StackOp == IC_LPAREN)
232           break;
233 
234         if (StackOp == IC_RPAREN) {
235           ++ParenCount;
236           InfixOperatorStack.pop_back();
237         } else if (StackOp == IC_LPAREN) {
238           --ParenCount;
239           InfixOperatorStack.pop_back();
240         } else {
241           InfixOperatorStack.pop_back();
242           PostfixStack.push_back(std::make_pair(StackOp, 0));
243         }
244       }
245       // Push the new operator.
246       InfixOperatorStack.push_back(Op);
247     }
248 
249     int64_t execute() {
250       // Push any remaining operators onto the postfix stack.
251       while (!InfixOperatorStack.empty()) {
252         InfixCalculatorTok StackOp = InfixOperatorStack.pop_back_val();
253         if (StackOp != IC_LPAREN && StackOp != IC_RPAREN)
254           PostfixStack.push_back(std::make_pair(StackOp, 0));
255       }
256 
257       if (PostfixStack.empty())
258         return 0;
259 
260       SmallVector<ICToken, 16> OperandStack;
261       for (unsigned i = 0, e = PostfixStack.size(); i != e; ++i) {
262         ICToken Op = PostfixStack[i];
263         if (Op.first == IC_IMM || Op.first == IC_REGISTER) {
264           OperandStack.push_back(Op);
265         } else if (isUnaryOperator(Op.first)) {
266           assert (OperandStack.size() > 0 && "Too few operands.");
267           ICToken Operand = OperandStack.pop_back_val();
268           assert (Operand.first == IC_IMM &&
269                   "Unary operation with a register!");
270           switch (Op.first) {
271           default:
272             report_fatal_error("Unexpected operator!");
273             break;
274           case IC_NEG:
275             OperandStack.push_back(std::make_pair(IC_IMM, -Operand.second));
276             break;
277           case IC_NOT:
278             OperandStack.push_back(std::make_pair(IC_IMM, ~Operand.second));
279             break;
280           }
281         } else {
282           assert (OperandStack.size() > 1 && "Too few operands.");
283           int64_t Val;
284           ICToken Op2 = OperandStack.pop_back_val();
285           ICToken Op1 = OperandStack.pop_back_val();
286           switch (Op.first) {
287           default:
288             report_fatal_error("Unexpected operator!");
289             break;
290           case IC_PLUS:
291             Val = Op1.second + Op2.second;
292             OperandStack.push_back(std::make_pair(IC_IMM, Val));
293             break;
294           case IC_MINUS:
295             Val = Op1.second - Op2.second;
296             OperandStack.push_back(std::make_pair(IC_IMM, Val));
297             break;
298           case IC_MULTIPLY:
299             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
300                     "Multiply operation with an immediate and a register!");
301             Val = Op1.second * Op2.second;
302             OperandStack.push_back(std::make_pair(IC_IMM, Val));
303             break;
304           case IC_DIVIDE:
305             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
306                     "Divide operation with an immediate and a register!");
307             assert (Op2.second != 0 && "Division by zero!");
308             Val = Op1.second / Op2.second;
309             OperandStack.push_back(std::make_pair(IC_IMM, Val));
310             break;
311           case IC_MOD:
312             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
313                     "Modulo operation with an immediate and a register!");
314             Val = Op1.second % Op2.second;
315             OperandStack.push_back(std::make_pair(IC_IMM, Val));
316             break;
317           case IC_OR:
318             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
319                     "Or operation with an immediate and a register!");
320             Val = Op1.second | Op2.second;
321             OperandStack.push_back(std::make_pair(IC_IMM, Val));
322             break;
323           case IC_XOR:
324             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
325               "Xor operation with an immediate and a register!");
326             Val = Op1.second ^ Op2.second;
327             OperandStack.push_back(std::make_pair(IC_IMM, Val));
328             break;
329           case IC_AND:
330             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
331                     "And operation with an immediate and a register!");
332             Val = Op1.second & Op2.second;
333             OperandStack.push_back(std::make_pair(IC_IMM, Val));
334             break;
335           case IC_LSHIFT:
336             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
337                     "Left shift operation with an immediate and a register!");
338             Val = Op1.second << Op2.second;
339             OperandStack.push_back(std::make_pair(IC_IMM, Val));
340             break;
341           case IC_RSHIFT:
342             assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
343                     "Right shift operation with an immediate and a register!");
344             Val = Op1.second >> Op2.second;
345             OperandStack.push_back(std::make_pair(IC_IMM, Val));
346             break;
347           case IC_EQ:
348             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
349                    "Equals operation with an immediate and a register!");
350             Val = (Op1.second == Op2.second) ? -1 : 0;
351             OperandStack.push_back(std::make_pair(IC_IMM, Val));
352             break;
353           case IC_NE:
354             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
355                    "Not-equals operation with an immediate and a register!");
356             Val = (Op1.second != Op2.second) ? -1 : 0;
357             OperandStack.push_back(std::make_pair(IC_IMM, Val));
358             break;
359           case IC_LT:
360             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
361                    "Less-than operation with an immediate and a register!");
362             Val = (Op1.second < Op2.second) ? -1 : 0;
363             OperandStack.push_back(std::make_pair(IC_IMM, Val));
364             break;
365           case IC_LE:
366             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
367                    "Less-than-or-equal operation with an immediate and a "
368                    "register!");
369             Val = (Op1.second <= Op2.second) ? -1 : 0;
370             OperandStack.push_back(std::make_pair(IC_IMM, Val));
371             break;
372           case IC_GT:
373             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
374                    "Greater-than operation with an immediate and a register!");
375             Val = (Op1.second > Op2.second) ? -1 : 0;
376             OperandStack.push_back(std::make_pair(IC_IMM, Val));
377             break;
378           case IC_GE:
379             assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
380                    "Greater-than-or-equal operation with an immediate and a "
381                    "register!");
382             Val = (Op1.second >= Op2.second) ? -1 : 0;
383             OperandStack.push_back(std::make_pair(IC_IMM, Val));
384             break;
385           }
386         }
387       }
388       assert (OperandStack.size() == 1 && "Expected a single result.");
389       return OperandStack.pop_back_val().second;
390     }
391   };
392 
393   enum IntelExprState {
394     IES_INIT,
395     IES_OR,
396     IES_XOR,
397     IES_AND,
398     IES_EQ,
399     IES_NE,
400     IES_LT,
401     IES_LE,
402     IES_GT,
403     IES_GE,
404     IES_LSHIFT,
405     IES_RSHIFT,
406     IES_PLUS,
407     IES_MINUS,
408     IES_OFFSET,
409     IES_CAST,
410     IES_NOT,
411     IES_MULTIPLY,
412     IES_DIVIDE,
413     IES_MOD,
414     IES_LBRAC,
415     IES_RBRAC,
416     IES_LPAREN,
417     IES_RPAREN,
418     IES_REGISTER,
419     IES_INTEGER,
420     IES_IDENTIFIER,
421     IES_ERROR
422   };
423 
424   class IntelExprStateMachine {
425     IntelExprState State, PrevState;
426     unsigned BaseReg, IndexReg, TmpReg, Scale;
427     int64_t Imm;
428     const MCExpr *Sym;
429     StringRef SymName;
430     InfixCalculator IC;
431     InlineAsmIdentifierInfo Info;
432     short BracCount;
433     bool MemExpr;
434     bool OffsetOperator;
435     SMLoc OffsetOperatorLoc;
436     AsmTypeInfo CurType;
437 
438     bool setSymRef(const MCExpr *Val, StringRef ID, StringRef &ErrMsg) {
439       if (Sym) {
440         ErrMsg = "cannot use more than one symbol in memory operand";
441         return true;
442       }
443       Sym = Val;
444       SymName = ID;
445       return false;
446     }
447 
448   public:
449     IntelExprStateMachine()
450         : State(IES_INIT), PrevState(IES_ERROR), BaseReg(0), IndexReg(0),
451           TmpReg(0), Scale(0), Imm(0), Sym(nullptr), BracCount(0),
452           MemExpr(false), OffsetOperator(false) {}
453 
454     void addImm(int64_t imm) { Imm += imm; }
455     short getBracCount() const { return BracCount; }
456     bool isMemExpr() const { return MemExpr; }
457     bool isOffsetOperator() const { return OffsetOperator; }
458     SMLoc getOffsetLoc() const { return OffsetOperatorLoc; }
459     unsigned getBaseReg() const { return BaseReg; }
460     unsigned getIndexReg() const { return IndexReg; }
461     unsigned getScale() const { return Scale; }
462     const MCExpr *getSym() const { return Sym; }
463     StringRef getSymName() const { return SymName; }
464     StringRef getType() const { return CurType.Name; }
465     unsigned getSize() const { return CurType.Size; }
466     unsigned getElementSize() const { return CurType.ElementSize; }
467     unsigned getLength() const { return CurType.Length; }
468     int64_t getImm() { return Imm + IC.execute(); }
469     bool isValidEndState() const {
470       return State == IES_RBRAC || State == IES_INTEGER;
471     }
472     bool hadError() const { return State == IES_ERROR; }
473     const InlineAsmIdentifierInfo &getIdentifierInfo() const { return Info; }
474 
475     void onOr() {
476       IntelExprState CurrState = State;
477       switch (State) {
478       default:
479         State = IES_ERROR;
480         break;
481       case IES_INTEGER:
482       case IES_RPAREN:
483       case IES_REGISTER:
484         State = IES_OR;
485         IC.pushOperator(IC_OR);
486         break;
487       }
488       PrevState = CurrState;
489     }
490     void onXor() {
491       IntelExprState CurrState = State;
492       switch (State) {
493       default:
494         State = IES_ERROR;
495         break;
496       case IES_INTEGER:
497       case IES_RPAREN:
498       case IES_REGISTER:
499         State = IES_XOR;
500         IC.pushOperator(IC_XOR);
501         break;
502       }
503       PrevState = CurrState;
504     }
505     void onAnd() {
506       IntelExprState CurrState = State;
507       switch (State) {
508       default:
509         State = IES_ERROR;
510         break;
511       case IES_INTEGER:
512       case IES_RPAREN:
513       case IES_REGISTER:
514         State = IES_AND;
515         IC.pushOperator(IC_AND);
516         break;
517       }
518       PrevState = CurrState;
519     }
520     void onEq() {
521       IntelExprState CurrState = State;
522       switch (State) {
523       default:
524         State = IES_ERROR;
525         break;
526       case IES_INTEGER:
527       case IES_RPAREN:
528       case IES_REGISTER:
529         State = IES_EQ;
530         IC.pushOperator(IC_EQ);
531         break;
532       }
533       PrevState = CurrState;
534     }
535     void onNE() {
536       IntelExprState CurrState = State;
537       switch (State) {
538       default:
539         State = IES_ERROR;
540         break;
541       case IES_INTEGER:
542       case IES_RPAREN:
543       case IES_REGISTER:
544         State = IES_NE;
545         IC.pushOperator(IC_NE);
546         break;
547       }
548       PrevState = CurrState;
549     }
550     void onLT() {
551       IntelExprState CurrState = State;
552       switch (State) {
553       default:
554         State = IES_ERROR;
555         break;
556       case IES_INTEGER:
557       case IES_RPAREN:
558       case IES_REGISTER:
559         State = IES_LT;
560         IC.pushOperator(IC_LT);
561         break;
562       }
563       PrevState = CurrState;
564     }
565     void onLE() {
566       IntelExprState CurrState = State;
567       switch (State) {
568       default:
569         State = IES_ERROR;
570         break;
571       case IES_INTEGER:
572       case IES_RPAREN:
573       case IES_REGISTER:
574         State = IES_LE;
575         IC.pushOperator(IC_LE);
576         break;
577       }
578       PrevState = CurrState;
579     }
580     void onGT() {
581       IntelExprState CurrState = State;
582       switch (State) {
583       default:
584         State = IES_ERROR;
585         break;
586       case IES_INTEGER:
587       case IES_RPAREN:
588       case IES_REGISTER:
589         State = IES_GT;
590         IC.pushOperator(IC_GT);
591         break;
592       }
593       PrevState = CurrState;
594     }
595     void onGE() {
596       IntelExprState CurrState = State;
597       switch (State) {
598       default:
599         State = IES_ERROR;
600         break;
601       case IES_INTEGER:
602       case IES_RPAREN:
603       case IES_REGISTER:
604         State = IES_GE;
605         IC.pushOperator(IC_GE);
606         break;
607       }
608       PrevState = CurrState;
609     }
610     void onLShift() {
611       IntelExprState CurrState = State;
612       switch (State) {
613       default:
614         State = IES_ERROR;
615         break;
616       case IES_INTEGER:
617       case IES_RPAREN:
618       case IES_REGISTER:
619         State = IES_LSHIFT;
620         IC.pushOperator(IC_LSHIFT);
621         break;
622       }
623       PrevState = CurrState;
624     }
625     void onRShift() {
626       IntelExprState CurrState = State;
627       switch (State) {
628       default:
629         State = IES_ERROR;
630         break;
631       case IES_INTEGER:
632       case IES_RPAREN:
633       case IES_REGISTER:
634         State = IES_RSHIFT;
635         IC.pushOperator(IC_RSHIFT);
636         break;
637       }
638       PrevState = CurrState;
639     }
640     bool onPlus(StringRef &ErrMsg) {
641       IntelExprState CurrState = State;
642       switch (State) {
643       default:
644         State = IES_ERROR;
645         break;
646       case IES_INTEGER:
647       case IES_RPAREN:
648       case IES_REGISTER:
649       case IES_OFFSET:
650         State = IES_PLUS;
651         IC.pushOperator(IC_PLUS);
652         if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
653           // If we already have a BaseReg, then assume this is the IndexReg with
654           // no explicit scale.
655           if (!BaseReg) {
656             BaseReg = TmpReg;
657           } else {
658             if (IndexReg) {
659               ErrMsg = "BaseReg/IndexReg already set!";
660               return true;
661             }
662             IndexReg = TmpReg;
663             Scale = 0;
664           }
665         }
666         break;
667       }
668       PrevState = CurrState;
669       return false;
670     }
671     bool onMinus(StringRef &ErrMsg) {
672       IntelExprState CurrState = State;
673       switch (State) {
674       default:
675         State = IES_ERROR;
676         break;
677       case IES_OR:
678       case IES_XOR:
679       case IES_AND:
680       case IES_EQ:
681       case IES_NE:
682       case IES_LT:
683       case IES_LE:
684       case IES_GT:
685       case IES_GE:
686       case IES_LSHIFT:
687       case IES_RSHIFT:
688       case IES_PLUS:
689       case IES_NOT:
690       case IES_MULTIPLY:
691       case IES_DIVIDE:
692       case IES_MOD:
693       case IES_LPAREN:
694       case IES_RPAREN:
695       case IES_LBRAC:
696       case IES_RBRAC:
697       case IES_INTEGER:
698       case IES_REGISTER:
699       case IES_INIT:
700       case IES_OFFSET:
701         State = IES_MINUS;
702         // push minus operator if it is not a negate operator
703         if (CurrState == IES_REGISTER || CurrState == IES_RPAREN ||
704             CurrState == IES_INTEGER  || CurrState == IES_RBRAC  ||
705             CurrState == IES_OFFSET)
706           IC.pushOperator(IC_MINUS);
707         else if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
708           // We have negate operator for Scale: it's illegal
709           ErrMsg = "Scale can't be negative";
710           return true;
711         } else
712           IC.pushOperator(IC_NEG);
713         if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
714           // If we already have a BaseReg, then assume this is the IndexReg with
715           // no explicit scale.
716           if (!BaseReg) {
717             BaseReg = TmpReg;
718           } else {
719             if (IndexReg) {
720               ErrMsg = "BaseReg/IndexReg already set!";
721               return true;
722             }
723             IndexReg = TmpReg;
724             Scale = 0;
725           }
726         }
727         break;
728       }
729       PrevState = CurrState;
730       return false;
731     }
732     void onNot() {
733       IntelExprState CurrState = State;
734       switch (State) {
735       default:
736         State = IES_ERROR;
737         break;
738       case IES_OR:
739       case IES_XOR:
740       case IES_AND:
741       case IES_EQ:
742       case IES_NE:
743       case IES_LT:
744       case IES_LE:
745       case IES_GT:
746       case IES_GE:
747       case IES_LSHIFT:
748       case IES_RSHIFT:
749       case IES_PLUS:
750       case IES_MINUS:
751       case IES_NOT:
752       case IES_MULTIPLY:
753       case IES_DIVIDE:
754       case IES_MOD:
755       case IES_LPAREN:
756       case IES_LBRAC:
757       case IES_INIT:
758         State = IES_NOT;
759         IC.pushOperator(IC_NOT);
760         break;
761       }
762       PrevState = CurrState;
763     }
764     bool onRegister(unsigned Reg, StringRef &ErrMsg) {
765       IntelExprState CurrState = State;
766       switch (State) {
767       default:
768         State = IES_ERROR;
769         break;
770       case IES_PLUS:
771       case IES_LPAREN:
772       case IES_LBRAC:
773         State = IES_REGISTER;
774         TmpReg = Reg;
775         IC.pushOperand(IC_REGISTER);
776         break;
777       case IES_MULTIPLY:
778         // Index Register - Scale * Register
779         if (PrevState == IES_INTEGER) {
780           if (IndexReg) {
781             ErrMsg = "BaseReg/IndexReg already set!";
782             return true;
783           }
784           State = IES_REGISTER;
785           IndexReg = Reg;
786           // Get the scale and replace the 'Scale * Register' with '0'.
787           Scale = IC.popOperand();
788           if (checkScale(Scale, ErrMsg))
789             return true;
790           IC.pushOperand(IC_IMM);
791           IC.popOperator();
792         } else {
793           State = IES_ERROR;
794         }
795         break;
796       }
797       PrevState = CurrState;
798       return false;
799     }
800     bool onIdentifierExpr(const MCExpr *SymRef, StringRef SymRefName,
801                           const InlineAsmIdentifierInfo &IDInfo,
802                           const AsmTypeInfo &Type, bool ParsingMSInlineAsm,
803                           StringRef &ErrMsg) {
804       // InlineAsm: Treat an enum value as an integer
805       if (ParsingMSInlineAsm)
806         if (IDInfo.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
807           return onInteger(IDInfo.Enum.EnumVal, ErrMsg);
808       // Treat a symbolic constant like an integer
809       if (auto *CE = dyn_cast<MCConstantExpr>(SymRef))
810         return onInteger(CE->getValue(), ErrMsg);
811       PrevState = State;
812       switch (State) {
813       default:
814         State = IES_ERROR;
815         break;
816       case IES_CAST:
817       case IES_PLUS:
818       case IES_MINUS:
819       case IES_NOT:
820       case IES_INIT:
821       case IES_LBRAC:
822       case IES_LPAREN:
823         if (setSymRef(SymRef, SymRefName, ErrMsg))
824           return true;
825         MemExpr = true;
826         State = IES_INTEGER;
827         IC.pushOperand(IC_IMM);
828         if (ParsingMSInlineAsm)
829           Info = IDInfo;
830         setTypeInfo(Type);
831         break;
832       }
833       return false;
834     }
835     bool onInteger(int64_t TmpInt, StringRef &ErrMsg) {
836       IntelExprState CurrState = State;
837       switch (State) {
838       default:
839         State = IES_ERROR;
840         break;
841       case IES_PLUS:
842       case IES_MINUS:
843       case IES_NOT:
844       case IES_OR:
845       case IES_XOR:
846       case IES_AND:
847       case IES_EQ:
848       case IES_NE:
849       case IES_LT:
850       case IES_LE:
851       case IES_GT:
852       case IES_GE:
853       case IES_LSHIFT:
854       case IES_RSHIFT:
855       case IES_DIVIDE:
856       case IES_MOD:
857       case IES_MULTIPLY:
858       case IES_LPAREN:
859       case IES_INIT:
860       case IES_LBRAC:
861         State = IES_INTEGER;
862         if (PrevState == IES_REGISTER && CurrState == IES_MULTIPLY) {
863           // Index Register - Register * Scale
864           if (IndexReg) {
865             ErrMsg = "BaseReg/IndexReg already set!";
866             return true;
867           }
868           IndexReg = TmpReg;
869           Scale = TmpInt;
870           if (checkScale(Scale, ErrMsg))
871             return true;
872           // Get the scale and replace the 'Register * Scale' with '0'.
873           IC.popOperator();
874         } else {
875           IC.pushOperand(IC_IMM, TmpInt);
876         }
877         break;
878       }
879       PrevState = CurrState;
880       return false;
881     }
882     void onStar() {
883       PrevState = State;
884       switch (State) {
885       default:
886         State = IES_ERROR;
887         break;
888       case IES_INTEGER:
889       case IES_REGISTER:
890       case IES_RPAREN:
891         State = IES_MULTIPLY;
892         IC.pushOperator(IC_MULTIPLY);
893         break;
894       }
895     }
896     void onDivide() {
897       PrevState = State;
898       switch (State) {
899       default:
900         State = IES_ERROR;
901         break;
902       case IES_INTEGER:
903       case IES_RPAREN:
904         State = IES_DIVIDE;
905         IC.pushOperator(IC_DIVIDE);
906         break;
907       }
908     }
909     void onMod() {
910       PrevState = State;
911       switch (State) {
912       default:
913         State = IES_ERROR;
914         break;
915       case IES_INTEGER:
916       case IES_RPAREN:
917         State = IES_MOD;
918         IC.pushOperator(IC_MOD);
919         break;
920       }
921     }
922     bool onLBrac() {
923       if (BracCount)
924         return true;
925       PrevState = State;
926       switch (State) {
927       default:
928         State = IES_ERROR;
929         break;
930       case IES_RBRAC:
931       case IES_INTEGER:
932       case IES_RPAREN:
933         State = IES_PLUS;
934         IC.pushOperator(IC_PLUS);
935         CurType.Length = 1;
936         CurType.Size = CurType.ElementSize;
937         break;
938       case IES_INIT:
939       case IES_CAST:
940         assert(!BracCount && "BracCount should be zero on parsing's start");
941         State = IES_LBRAC;
942         break;
943       }
944       MemExpr = true;
945       BracCount++;
946       return false;
947     }
948     bool onRBrac() {
949       IntelExprState CurrState = State;
950       switch (State) {
951       default:
952         State = IES_ERROR;
953         break;
954       case IES_INTEGER:
955       case IES_OFFSET:
956       case IES_REGISTER:
957       case IES_RPAREN:
958         if (BracCount-- != 1)
959           return true;
960         State = IES_RBRAC;
961         if (CurrState == IES_REGISTER && PrevState != IES_MULTIPLY) {
962           // If we already have a BaseReg, then assume this is the IndexReg with
963           // no explicit scale.
964           if (!BaseReg) {
965             BaseReg = TmpReg;
966           } else {
967             assert (!IndexReg && "BaseReg/IndexReg already set!");
968             IndexReg = TmpReg;
969             Scale = 0;
970           }
971         }
972         break;
973       }
974       PrevState = CurrState;
975       return false;
976     }
977     void onLParen() {
978       IntelExprState CurrState = State;
979       switch (State) {
980       default:
981         State = IES_ERROR;
982         break;
983       case IES_PLUS:
984       case IES_MINUS:
985       case IES_NOT:
986       case IES_OR:
987       case IES_XOR:
988       case IES_AND:
989       case IES_EQ:
990       case IES_NE:
991       case IES_LT:
992       case IES_LE:
993       case IES_GT:
994       case IES_GE:
995       case IES_LSHIFT:
996       case IES_RSHIFT:
997       case IES_MULTIPLY:
998       case IES_DIVIDE:
999       case IES_MOD:
1000       case IES_LPAREN:
1001       case IES_INIT:
1002       case IES_LBRAC:
1003         State = IES_LPAREN;
1004         IC.pushOperator(IC_LPAREN);
1005         break;
1006       }
1007       PrevState = CurrState;
1008     }
1009     void onRParen() {
1010       PrevState = State;
1011       switch (State) {
1012       default:
1013         State = IES_ERROR;
1014         break;
1015       case IES_INTEGER:
1016       case IES_OFFSET:
1017       case IES_REGISTER:
1018       case IES_RBRAC:
1019       case IES_RPAREN:
1020         State = IES_RPAREN;
1021         IC.pushOperator(IC_RPAREN);
1022         break;
1023       }
1024     }
1025     bool onOffset(const MCExpr *Val, SMLoc OffsetLoc, StringRef ID,
1026                   const InlineAsmIdentifierInfo &IDInfo,
1027                   bool ParsingMSInlineAsm, StringRef &ErrMsg) {
1028       PrevState = State;
1029       switch (State) {
1030       default:
1031         ErrMsg = "unexpected offset operator expression";
1032         return true;
1033       case IES_PLUS:
1034       case IES_INIT:
1035       case IES_LBRAC:
1036         if (setSymRef(Val, ID, ErrMsg))
1037           return true;
1038         OffsetOperator = true;
1039         OffsetOperatorLoc = OffsetLoc;
1040         State = IES_OFFSET;
1041         // As we cannot yet resolve the actual value (offset), we retain
1042         // the requested semantics by pushing a '0' to the operands stack
1043         IC.pushOperand(IC_IMM);
1044         if (ParsingMSInlineAsm) {
1045           Info = IDInfo;
1046         }
1047         break;
1048       }
1049       return false;
1050     }
1051     void onCast(AsmTypeInfo Info) {
1052       PrevState = State;
1053       switch (State) {
1054       default:
1055         State = IES_ERROR;
1056         break;
1057       case IES_LPAREN:
1058         setTypeInfo(Info);
1059         State = IES_CAST;
1060         break;
1061       }
1062     }
1063     void setTypeInfo(AsmTypeInfo Type) { CurType = Type; }
1064   };
1065 
1066   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None,
1067              bool MatchingInlineAsm = false) {
1068     MCAsmParser &Parser = getParser();
1069     if (MatchingInlineAsm) {
1070       if (!getLexer().isAtStartOfStatement())
1071         Parser.eatToEndOfStatement();
1072       return false;
1073     }
1074     return Parser.Error(L, Msg, Range);
1075   }
1076 
1077   bool MatchRegisterByName(unsigned &RegNo, StringRef RegName, SMLoc StartLoc,
1078                            SMLoc EndLoc);
1079   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc,
1080                      bool RestoreOnFailure);
1081 
1082   std::unique_ptr<X86Operand> DefaultMemSIOperand(SMLoc Loc);
1083   std::unique_ptr<X86Operand> DefaultMemDIOperand(SMLoc Loc);
1084   bool IsSIReg(unsigned Reg);
1085   unsigned GetSIDIForRegClass(unsigned RegClassID, unsigned Reg, bool IsSIReg);
1086   void
1087   AddDefaultSrcDestOperands(OperandVector &Operands,
1088                             std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1089                             std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst);
1090   bool VerifyAndAdjustOperands(OperandVector &OrigOperands,
1091                                OperandVector &FinalOperands);
1092   bool ParseOperand(OperandVector &Operands);
1093   bool ParseATTOperand(OperandVector &Operands);
1094   bool ParseIntelOperand(OperandVector &Operands);
1095   bool ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
1096                                 InlineAsmIdentifierInfo &Info, SMLoc &End);
1097   bool ParseIntelDotOperator(IntelExprStateMachine &SM, SMLoc &End);
1098   unsigned IdentifyIntelInlineAsmOperator(StringRef Name);
1099   unsigned ParseIntelInlineAsmOperator(unsigned OpKind);
1100   unsigned IdentifyMasmOperator(StringRef Name);
1101   bool ParseMasmOperator(unsigned OpKind, int64_t &Val);
1102   bool ParseRoundingModeOp(SMLoc Start, OperandVector &Operands);
1103   bool ParseIntelNamedOperator(StringRef Name, IntelExprStateMachine &SM,
1104                                bool &ParseError, SMLoc &End);
1105   bool ParseMasmNamedOperator(StringRef Name, IntelExprStateMachine &SM,
1106                               bool &ParseError, SMLoc &End);
1107   void RewriteIntelExpression(IntelExprStateMachine &SM, SMLoc Start,
1108                               SMLoc End);
1109   bool ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End);
1110   bool ParseIntelInlineAsmIdentifier(const MCExpr *&Val, StringRef &Identifier,
1111                                      InlineAsmIdentifierInfo &Info,
1112                                      bool IsUnevaluatedOperand, SMLoc &End,
1113                                      bool IsParsingOffsetOperator = false);
1114 
1115   bool ParseMemOperand(unsigned SegReg, const MCExpr *Disp, SMLoc StartLoc,
1116                        SMLoc EndLoc, OperandVector &Operands);
1117 
1118   X86::CondCode ParseConditionCode(StringRef CCode);
1119 
1120   bool ParseIntelMemoryOperandSize(unsigned &Size);
1121   bool CreateMemForMSInlineAsm(unsigned SegReg, const MCExpr *Disp,
1122                                unsigned BaseReg, unsigned IndexReg,
1123                                unsigned Scale, SMLoc Start, SMLoc End,
1124                                unsigned Size, StringRef Identifier,
1125                                const InlineAsmIdentifierInfo &Info,
1126                                OperandVector &Operands);
1127 
1128   bool parseDirectiveArch();
1129   bool parseDirectiveNops(SMLoc L);
1130   bool parseDirectiveEven(SMLoc L);
1131   bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
1132 
1133   /// CodeView FPO data directives.
1134   bool parseDirectiveFPOProc(SMLoc L);
1135   bool parseDirectiveFPOSetFrame(SMLoc L);
1136   bool parseDirectiveFPOPushReg(SMLoc L);
1137   bool parseDirectiveFPOStackAlloc(SMLoc L);
1138   bool parseDirectiveFPOStackAlign(SMLoc L);
1139   bool parseDirectiveFPOEndPrologue(SMLoc L);
1140   bool parseDirectiveFPOEndProc(SMLoc L);
1141 
1142   /// SEH directives.
1143   bool parseSEHRegisterNumber(unsigned RegClassID, unsigned &RegNo);
1144   bool parseDirectiveSEHPushReg(SMLoc);
1145   bool parseDirectiveSEHSetFrame(SMLoc);
1146   bool parseDirectiveSEHSaveReg(SMLoc);
1147   bool parseDirectiveSEHSaveXMM(SMLoc);
1148   bool parseDirectiveSEHPushFrame(SMLoc);
1149 
1150   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
1151 
1152   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
1153   bool processInstruction(MCInst &Inst, const OperandVector &Ops);
1154 
1155   // Load Value Injection (LVI) Mitigations for machine code
1156   void emitWarningForSpecialLVIInstruction(SMLoc Loc);
1157   void applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out);
1158   void applyLVILoadHardeningMitigation(MCInst &Inst, MCStreamer &Out);
1159 
1160   /// Wrapper around MCStreamer::emitInstruction(). Possibly adds
1161   /// instrumentation around Inst.
1162   void emitInstruction(MCInst &Inst, OperandVector &Operands, MCStreamer &Out);
1163 
1164   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1165                                OperandVector &Operands, MCStreamer &Out,
1166                                uint64_t &ErrorInfo,
1167                                bool MatchingInlineAsm) override;
1168 
1169   void MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op, OperandVector &Operands,
1170                          MCStreamer &Out, bool MatchingInlineAsm);
1171 
1172   bool ErrorMissingFeature(SMLoc IDLoc, const FeatureBitset &MissingFeatures,
1173                            bool MatchingInlineAsm);
1174 
1175   bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
1176                                   OperandVector &Operands, MCStreamer &Out,
1177                                   uint64_t &ErrorInfo,
1178                                   bool MatchingInlineAsm);
1179 
1180   bool MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
1181                                     OperandVector &Operands, MCStreamer &Out,
1182                                     uint64_t &ErrorInfo,
1183                                     bool MatchingInlineAsm);
1184 
1185   bool OmitRegisterFromClobberLists(unsigned RegNo) override;
1186 
1187   /// Parses AVX512 specific operand primitives: masked registers ({%k<NUM>}, {z})
1188   /// and memory broadcasting ({1to<NUM>}) primitives, updating Operands vector if required.
1189   /// return false if no parsing errors occurred, true otherwise.
1190   bool HandleAVX512Operand(OperandVector &Operands);
1191 
1192   bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
1193 
1194   bool is64BitMode() const {
1195     // FIXME: Can tablegen auto-generate this?
1196     return getSTI().getFeatureBits()[X86::Mode64Bit];
1197   }
1198   bool is32BitMode() const {
1199     // FIXME: Can tablegen auto-generate this?
1200     return getSTI().getFeatureBits()[X86::Mode32Bit];
1201   }
1202   bool is16BitMode() const {
1203     // FIXME: Can tablegen auto-generate this?
1204     return getSTI().getFeatureBits()[X86::Mode16Bit];
1205   }
1206   void SwitchMode(unsigned mode) {
1207     MCSubtargetInfo &STI = copySTI();
1208     FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
1209     FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
1210     FeatureBitset FB = ComputeAvailableFeatures(
1211       STI.ToggleFeature(OldMode.flip(mode)));
1212     setAvailableFeatures(FB);
1213 
1214     assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
1215   }
1216 
1217   unsigned getPointerWidth() {
1218     if (is16BitMode()) return 16;
1219     if (is32BitMode()) return 32;
1220     if (is64BitMode()) return 64;
1221     llvm_unreachable("invalid mode");
1222   }
1223 
1224   bool isParsingIntelSyntax() {
1225     return getParser().getAssemblerDialect();
1226   }
1227 
1228   /// @name Auto-generated Matcher Functions
1229   /// {
1230 
1231 #define GET_ASSEMBLER_HEADER
1232 #include "X86GenAsmMatcher.inc"
1233 
1234   /// }
1235 
1236 public:
1237   enum X86MatchResultTy {
1238     Match_Unsupported = FIRST_TARGET_MATCH_RESULT_TY,
1239 #define GET_OPERAND_DIAGNOSTIC_TYPES
1240 #include "X86GenAsmMatcher.inc"
1241   };
1242 
1243   X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
1244                const MCInstrInfo &mii, const MCTargetOptions &Options)
1245       : MCTargetAsmParser(Options, sti, mii),  InstInfo(nullptr),
1246         Code16GCC(false) {
1247 
1248     Parser.addAliasForDirective(".word", ".2byte");
1249 
1250     // Initialize the set of available features.
1251     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
1252   }
1253 
1254   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1255   OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1256                                         SMLoc &EndLoc) override;
1257 
1258   bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
1259 
1260   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1261                         SMLoc NameLoc, OperandVector &Operands) override;
1262 
1263   bool ParseDirective(AsmToken DirectiveID) override;
1264 };
1265 } // end anonymous namespace
1266 
1267 /// @name Auto-generated Match Functions
1268 /// {
1269 
1270 static unsigned MatchRegisterName(StringRef Name);
1271 
1272 /// }
1273 
1274 static bool CheckBaseRegAndIndexRegAndScale(unsigned BaseReg, unsigned IndexReg,
1275                                             unsigned Scale, bool Is64BitMode,
1276                                             StringRef &ErrMsg) {
1277   // If we have both a base register and an index register make sure they are
1278   // both 64-bit or 32-bit registers.
1279   // To support VSIB, IndexReg can be 128-bit or 256-bit registers.
1280 
1281   if (BaseReg != 0 &&
1282       !(BaseReg == X86::RIP || BaseReg == X86::EIP ||
1283         X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) ||
1284         X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) ||
1285         X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg))) {
1286     ErrMsg = "invalid base+index expression";
1287     return true;
1288   }
1289 
1290   if (IndexReg != 0 &&
1291       !(IndexReg == X86::EIZ || IndexReg == X86::RIZ ||
1292         X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1293         X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1294         X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
1295         X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
1296         X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
1297         X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg))) {
1298     ErrMsg = "invalid base+index expression";
1299     return true;
1300   }
1301 
1302   if (((BaseReg == X86::RIP || BaseReg == X86::EIP) && IndexReg != 0) ||
1303       IndexReg == X86::EIP || IndexReg == X86::RIP ||
1304       IndexReg == X86::ESP || IndexReg == X86::RSP) {
1305     ErrMsg = "invalid base+index expression";
1306     return true;
1307   }
1308 
1309   // Check for use of invalid 16-bit registers. Only BX/BP/SI/DI are allowed,
1310   // and then only in non-64-bit modes.
1311   if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
1312       (Is64BitMode || (BaseReg != X86::BX && BaseReg != X86::BP &&
1313                        BaseReg != X86::SI && BaseReg != X86::DI))) {
1314     ErrMsg = "invalid 16-bit base register";
1315     return true;
1316   }
1317 
1318   if (BaseReg == 0 &&
1319       X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg)) {
1320     ErrMsg = "16-bit memory operand may not include only index register";
1321     return true;
1322   }
1323 
1324   if (BaseReg != 0 && IndexReg != 0) {
1325     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg) &&
1326         (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1327          X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1328          IndexReg == X86::EIZ)) {
1329       ErrMsg = "base register is 64-bit, but index register is not";
1330       return true;
1331     }
1332     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg) &&
1333         (X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg) ||
1334          X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg) ||
1335          IndexReg == X86::RIZ)) {
1336       ErrMsg = "base register is 32-bit, but index register is not";
1337       return true;
1338     }
1339     if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg)) {
1340       if (X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg) ||
1341           X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg)) {
1342         ErrMsg = "base register is 16-bit, but index register is not";
1343         return true;
1344       }
1345       if ((BaseReg != X86::BX && BaseReg != X86::BP) ||
1346           (IndexReg != X86::SI && IndexReg != X86::DI)) {
1347         ErrMsg = "invalid 16-bit base/index register combination";
1348         return true;
1349       }
1350     }
1351   }
1352 
1353   // RIP/EIP-relative addressing is only supported in 64-bit mode.
1354   if (!Is64BitMode && BaseReg != 0 &&
1355       (BaseReg == X86::RIP || BaseReg == X86::EIP)) {
1356     ErrMsg = "IP-relative addressing requires 64-bit mode";
1357     return true;
1358   }
1359 
1360   return checkScale(Scale, ErrMsg);
1361 }
1362 
1363 bool X86AsmParser::MatchRegisterByName(unsigned &RegNo, StringRef RegName,
1364                                        SMLoc StartLoc, SMLoc EndLoc) {
1365   // If we encounter a %, ignore it. This code handles registers with and
1366   // without the prefix, unprefixed registers can occur in cfi directives.
1367   RegName.consume_front("%");
1368 
1369   RegNo = MatchRegisterName(RegName);
1370 
1371   // If the match failed, try the register name as lowercase.
1372   if (RegNo == 0)
1373     RegNo = MatchRegisterName(RegName.lower());
1374 
1375   // The "flags" and "mxcsr" registers cannot be referenced directly.
1376   // Treat it as an identifier instead.
1377   if (isParsingMSInlineAsm() && isParsingIntelSyntax() &&
1378       (RegNo == X86::EFLAGS || RegNo == X86::MXCSR))
1379     RegNo = 0;
1380 
1381   if (!is64BitMode()) {
1382     // FIXME: This should be done using Requires<Not64BitMode> and
1383     // Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
1384     // checked.
1385     if (RegNo == X86::RIZ || RegNo == X86::RIP ||
1386         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo) ||
1387         X86II::isX86_64NonExtLowByteReg(RegNo) ||
1388         X86II::isX86_64ExtendedReg(RegNo)) {
1389       return Error(StartLoc,
1390                    "register %" + RegName + " is only available in 64-bit mode",
1391                    SMRange(StartLoc, EndLoc));
1392     }
1393   }
1394 
1395   // If this is "db[0-15]", match it as an alias
1396   // for dr[0-15].
1397   if (RegNo == 0 && RegName.startswith("db")) {
1398     if (RegName.size() == 3) {
1399       switch (RegName[2]) {
1400       case '0':
1401         RegNo = X86::DR0;
1402         break;
1403       case '1':
1404         RegNo = X86::DR1;
1405         break;
1406       case '2':
1407         RegNo = X86::DR2;
1408         break;
1409       case '3':
1410         RegNo = X86::DR3;
1411         break;
1412       case '4':
1413         RegNo = X86::DR4;
1414         break;
1415       case '5':
1416         RegNo = X86::DR5;
1417         break;
1418       case '6':
1419         RegNo = X86::DR6;
1420         break;
1421       case '7':
1422         RegNo = X86::DR7;
1423         break;
1424       case '8':
1425         RegNo = X86::DR8;
1426         break;
1427       case '9':
1428         RegNo = X86::DR9;
1429         break;
1430       }
1431     } else if (RegName.size() == 4 && RegName[2] == '1') {
1432       switch (RegName[3]) {
1433       case '0':
1434         RegNo = X86::DR10;
1435         break;
1436       case '1':
1437         RegNo = X86::DR11;
1438         break;
1439       case '2':
1440         RegNo = X86::DR12;
1441         break;
1442       case '3':
1443         RegNo = X86::DR13;
1444         break;
1445       case '4':
1446         RegNo = X86::DR14;
1447         break;
1448       case '5':
1449         RegNo = X86::DR15;
1450         break;
1451       }
1452     }
1453   }
1454 
1455   if (RegNo == 0) {
1456     if (isParsingIntelSyntax())
1457       return true;
1458     return Error(StartLoc, "invalid register name", SMRange(StartLoc, EndLoc));
1459   }
1460   return false;
1461 }
1462 
1463 bool X86AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1464                                  SMLoc &EndLoc, bool RestoreOnFailure) {
1465   MCAsmParser &Parser = getParser();
1466   MCAsmLexer &Lexer = getLexer();
1467   RegNo = 0;
1468 
1469   SmallVector<AsmToken, 5> Tokens;
1470   auto OnFailure = [RestoreOnFailure, &Lexer, &Tokens]() {
1471     if (RestoreOnFailure) {
1472       while (!Tokens.empty()) {
1473         Lexer.UnLex(Tokens.pop_back_val());
1474       }
1475     }
1476   };
1477 
1478   const AsmToken &PercentTok = Parser.getTok();
1479   StartLoc = PercentTok.getLoc();
1480 
1481   // If we encounter a %, ignore it. This code handles registers with and
1482   // without the prefix, unprefixed registers can occur in cfi directives.
1483   if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent)) {
1484     Tokens.push_back(PercentTok);
1485     Parser.Lex(); // Eat percent token.
1486   }
1487 
1488   const AsmToken &Tok = Parser.getTok();
1489   EndLoc = Tok.getEndLoc();
1490 
1491   if (Tok.isNot(AsmToken::Identifier)) {
1492     OnFailure();
1493     if (isParsingIntelSyntax()) return true;
1494     return Error(StartLoc, "invalid register name",
1495                  SMRange(StartLoc, EndLoc));
1496   }
1497 
1498   if (MatchRegisterByName(RegNo, Tok.getString(), StartLoc, EndLoc)) {
1499     OnFailure();
1500     return true;
1501   }
1502 
1503   // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
1504   if (RegNo == X86::ST0) {
1505     Tokens.push_back(Tok);
1506     Parser.Lex(); // Eat 'st'
1507 
1508     // Check to see if we have '(4)' after %st.
1509     if (Lexer.isNot(AsmToken::LParen))
1510       return false;
1511     // Lex the paren.
1512     Tokens.push_back(Parser.getTok());
1513     Parser.Lex();
1514 
1515     const AsmToken &IntTok = Parser.getTok();
1516     if (IntTok.isNot(AsmToken::Integer)) {
1517       OnFailure();
1518       return Error(IntTok.getLoc(), "expected stack index");
1519     }
1520     switch (IntTok.getIntVal()) {
1521     case 0: RegNo = X86::ST0; break;
1522     case 1: RegNo = X86::ST1; break;
1523     case 2: RegNo = X86::ST2; break;
1524     case 3: RegNo = X86::ST3; break;
1525     case 4: RegNo = X86::ST4; break;
1526     case 5: RegNo = X86::ST5; break;
1527     case 6: RegNo = X86::ST6; break;
1528     case 7: RegNo = X86::ST7; break;
1529     default:
1530       OnFailure();
1531       return Error(IntTok.getLoc(), "invalid stack index");
1532     }
1533 
1534     // Lex IntTok
1535     Tokens.push_back(IntTok);
1536     Parser.Lex();
1537     if (Lexer.isNot(AsmToken::RParen)) {
1538       OnFailure();
1539       return Error(Parser.getTok().getLoc(), "expected ')'");
1540     }
1541 
1542     EndLoc = Parser.getTok().getEndLoc();
1543     Parser.Lex(); // Eat ')'
1544     return false;
1545   }
1546 
1547   EndLoc = Parser.getTok().getEndLoc();
1548 
1549   if (RegNo == 0) {
1550     OnFailure();
1551     if (isParsingIntelSyntax()) return true;
1552     return Error(StartLoc, "invalid register name",
1553                  SMRange(StartLoc, EndLoc));
1554   }
1555 
1556   Parser.Lex(); // Eat identifier token.
1557   return false;
1558 }
1559 
1560 bool X86AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1561                                  SMLoc &EndLoc) {
1562   return ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/false);
1563 }
1564 
1565 OperandMatchResultTy X86AsmParser::tryParseRegister(unsigned &RegNo,
1566                                                     SMLoc &StartLoc,
1567                                                     SMLoc &EndLoc) {
1568   bool Result =
1569       ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/true);
1570   bool PendingErrors = getParser().hasPendingError();
1571   getParser().clearPendingErrors();
1572   if (PendingErrors)
1573     return MatchOperand_ParseFail;
1574   if (Result)
1575     return MatchOperand_NoMatch;
1576   return MatchOperand_Success;
1577 }
1578 
1579 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemSIOperand(SMLoc Loc) {
1580   bool Parse32 = is32BitMode() || Code16GCC;
1581   unsigned Basereg = is64BitMode() ? X86::RSI : (Parse32 ? X86::ESI : X86::SI);
1582   const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1583   return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1584                                /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1585                                Loc, Loc, 0);
1586 }
1587 
1588 std::unique_ptr<X86Operand> X86AsmParser::DefaultMemDIOperand(SMLoc Loc) {
1589   bool Parse32 = is32BitMode() || Code16GCC;
1590   unsigned Basereg = is64BitMode() ? X86::RDI : (Parse32 ? X86::EDI : X86::DI);
1591   const MCExpr *Disp = MCConstantExpr::create(0, getContext());
1592   return X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
1593                                /*BaseReg=*/Basereg, /*IndexReg=*/0, /*Scale=*/1,
1594                                Loc, Loc, 0);
1595 }
1596 
1597 bool X86AsmParser::IsSIReg(unsigned Reg) {
1598   switch (Reg) {
1599   default: llvm_unreachable("Only (R|E)SI and (R|E)DI are expected!");
1600   case X86::RSI:
1601   case X86::ESI:
1602   case X86::SI:
1603     return true;
1604   case X86::RDI:
1605   case X86::EDI:
1606   case X86::DI:
1607     return false;
1608   }
1609 }
1610 
1611 unsigned X86AsmParser::GetSIDIForRegClass(unsigned RegClassID, unsigned Reg,
1612                                           bool IsSIReg) {
1613   switch (RegClassID) {
1614   default: llvm_unreachable("Unexpected register class");
1615   case X86::GR64RegClassID:
1616     return IsSIReg ? X86::RSI : X86::RDI;
1617   case X86::GR32RegClassID:
1618     return IsSIReg ? X86::ESI : X86::EDI;
1619   case X86::GR16RegClassID:
1620     return IsSIReg ? X86::SI : X86::DI;
1621   }
1622 }
1623 
1624 void X86AsmParser::AddDefaultSrcDestOperands(
1625     OperandVector& Operands, std::unique_ptr<llvm::MCParsedAsmOperand> &&Src,
1626     std::unique_ptr<llvm::MCParsedAsmOperand> &&Dst) {
1627   if (isParsingIntelSyntax()) {
1628     Operands.push_back(std::move(Dst));
1629     Operands.push_back(std::move(Src));
1630   }
1631   else {
1632     Operands.push_back(std::move(Src));
1633     Operands.push_back(std::move(Dst));
1634   }
1635 }
1636 
1637 bool X86AsmParser::VerifyAndAdjustOperands(OperandVector &OrigOperands,
1638                                            OperandVector &FinalOperands) {
1639 
1640   if (OrigOperands.size() > 1) {
1641     // Check if sizes match, OrigOperands also contains the instruction name
1642     assert(OrigOperands.size() == FinalOperands.size() + 1 &&
1643            "Operand size mismatch");
1644 
1645     SmallVector<std::pair<SMLoc, std::string>, 2> Warnings;
1646     // Verify types match
1647     int RegClassID = -1;
1648     for (unsigned int i = 0; i < FinalOperands.size(); ++i) {
1649       X86Operand &OrigOp = static_cast<X86Operand &>(*OrigOperands[i + 1]);
1650       X86Operand &FinalOp = static_cast<X86Operand &>(*FinalOperands[i]);
1651 
1652       if (FinalOp.isReg() &&
1653           (!OrigOp.isReg() || FinalOp.getReg() != OrigOp.getReg()))
1654         // Return false and let a normal complaint about bogus operands happen
1655         return false;
1656 
1657       if (FinalOp.isMem()) {
1658 
1659         if (!OrigOp.isMem())
1660           // Return false and let a normal complaint about bogus operands happen
1661           return false;
1662 
1663         unsigned OrigReg = OrigOp.Mem.BaseReg;
1664         unsigned FinalReg = FinalOp.Mem.BaseReg;
1665 
1666         // If we've already encounterd a register class, make sure all register
1667         // bases are of the same register class
1668         if (RegClassID != -1 &&
1669             !X86MCRegisterClasses[RegClassID].contains(OrigReg)) {
1670           return Error(OrigOp.getStartLoc(),
1671                        "mismatching source and destination index registers");
1672         }
1673 
1674         if (X86MCRegisterClasses[X86::GR64RegClassID].contains(OrigReg))
1675           RegClassID = X86::GR64RegClassID;
1676         else if (X86MCRegisterClasses[X86::GR32RegClassID].contains(OrigReg))
1677           RegClassID = X86::GR32RegClassID;
1678         else if (X86MCRegisterClasses[X86::GR16RegClassID].contains(OrigReg))
1679           RegClassID = X86::GR16RegClassID;
1680         else
1681           // Unexpected register class type
1682           // Return false and let a normal complaint about bogus operands happen
1683           return false;
1684 
1685         bool IsSI = IsSIReg(FinalReg);
1686         FinalReg = GetSIDIForRegClass(RegClassID, FinalReg, IsSI);
1687 
1688         if (FinalReg != OrigReg) {
1689           std::string RegName = IsSI ? "ES:(R|E)SI" : "ES:(R|E)DI";
1690           Warnings.push_back(std::make_pair(
1691               OrigOp.getStartLoc(),
1692               "memory operand is only for determining the size, " + RegName +
1693                   " will be used for the location"));
1694         }
1695 
1696         FinalOp.Mem.Size = OrigOp.Mem.Size;
1697         FinalOp.Mem.SegReg = OrigOp.Mem.SegReg;
1698         FinalOp.Mem.BaseReg = FinalReg;
1699       }
1700     }
1701 
1702     // Produce warnings only if all the operands passed the adjustment - prevent
1703     // legal cases like "movsd (%rax), %xmm0" mistakenly produce warnings
1704     for (auto &WarningMsg : Warnings) {
1705       Warning(WarningMsg.first, WarningMsg.second);
1706     }
1707 
1708     // Remove old operands
1709     for (unsigned int i = 0; i < FinalOperands.size(); ++i)
1710       OrigOperands.pop_back();
1711   }
1712   // OrigOperands.append(FinalOperands.begin(), FinalOperands.end());
1713   for (unsigned int i = 0; i < FinalOperands.size(); ++i)
1714     OrigOperands.push_back(std::move(FinalOperands[i]));
1715 
1716   return false;
1717 }
1718 
1719 bool X86AsmParser::ParseOperand(OperandVector &Operands) {
1720   if (isParsingIntelSyntax())
1721     return ParseIntelOperand(Operands);
1722 
1723   return ParseATTOperand(Operands);
1724 }
1725 
1726 bool X86AsmParser::CreateMemForMSInlineAsm(
1727     unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
1728     unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
1729     const InlineAsmIdentifierInfo &Info, OperandVector &Operands) {
1730   // If we found a decl other than a VarDecl, then assume it is a FuncDecl or
1731   // some other label reference.
1732   if (Info.isKind(InlineAsmIdentifierInfo::IK_Label)) {
1733     // Insert an explicit size if the user didn't have one.
1734     if (!Size) {
1735       Size = getPointerWidth();
1736       InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
1737                                           /*Len=*/0, Size);
1738     }
1739     // Create an absolute memory reference in order to match against
1740     // instructions taking a PC relative operand.
1741     Operands.push_back(X86Operand::CreateMem(getPointerWidth(), Disp, Start,
1742                                              End, Size, Identifier,
1743                                              Info.Label.Decl));
1744     return false;
1745   }
1746   // We either have a direct symbol reference, or an offset from a symbol.  The
1747   // parser always puts the symbol on the LHS, so look there for size
1748   // calculation purposes.
1749   unsigned FrontendSize = 0;
1750   void *Decl = nullptr;
1751   bool IsGlobalLV = false;
1752   if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
1753     // Size is in terms of bits in this context.
1754     FrontendSize = Info.Var.Type * 8;
1755     Decl = Info.Var.Decl;
1756     IsGlobalLV = Info.Var.IsGlobalLV;
1757   }
1758   // It is widely common for MS InlineAsm to use a global variable and one/two
1759   // registers in a mmory expression, and though unaccessible via rip/eip.
1760   if (IsGlobalLV && (BaseReg || IndexReg)) {
1761     Operands.push_back(
1762         X86Operand::CreateMem(getPointerWidth(), Disp, Start, End));
1763     return false;
1764   }
1765   // Otherwise, we set the base register to a non-zero value
1766   // if we don't know the actual value at this time.  This is necessary to
1767   // get the matching correct in some cases.
1768   BaseReg = BaseReg ? BaseReg : 1;
1769   Operands.push_back(X86Operand::CreateMem(
1770       getPointerWidth(), SegReg, Disp, BaseReg, IndexReg, Scale, Start, End,
1771       Size,
1772       /*DefaultBaseReg=*/X86::RIP, Identifier, Decl, FrontendSize));
1773   return false;
1774 }
1775 
1776 // Some binary bitwise operators have a named synonymous
1777 // Query a candidate string for being such a named operator
1778 // and if so - invoke the appropriate handler
1779 bool X86AsmParser::ParseIntelNamedOperator(StringRef Name,
1780                                            IntelExprStateMachine &SM,
1781                                            bool &ParseError, SMLoc &End) {
1782   // A named operator should be either lower or upper case, but not a mix...
1783   // except in MASM, which uses full case-insensitivity.
1784   if (Name.compare(Name.lower()) && Name.compare(Name.upper()) &&
1785       !getParser().isParsingMasm())
1786     return false;
1787   if (Name.equals_insensitive("not")) {
1788     SM.onNot();
1789   } else if (Name.equals_insensitive("or")) {
1790     SM.onOr();
1791   } else if (Name.equals_insensitive("shl")) {
1792     SM.onLShift();
1793   } else if (Name.equals_insensitive("shr")) {
1794     SM.onRShift();
1795   } else if (Name.equals_insensitive("xor")) {
1796     SM.onXor();
1797   } else if (Name.equals_insensitive("and")) {
1798     SM.onAnd();
1799   } else if (Name.equals_insensitive("mod")) {
1800     SM.onMod();
1801   } else if (Name.equals_insensitive("offset")) {
1802     SMLoc OffsetLoc = getTok().getLoc();
1803     const MCExpr *Val = nullptr;
1804     StringRef ID;
1805     InlineAsmIdentifierInfo Info;
1806     ParseError = ParseIntelOffsetOperator(Val, ID, Info, End);
1807     if (ParseError)
1808       return true;
1809     StringRef ErrMsg;
1810     ParseError =
1811         SM.onOffset(Val, OffsetLoc, ID, Info, isParsingMSInlineAsm(), ErrMsg);
1812     if (ParseError)
1813       return Error(SMLoc::getFromPointer(Name.data()), ErrMsg);
1814   } else {
1815     return false;
1816   }
1817   if (!Name.equals_insensitive("offset"))
1818     End = consumeToken();
1819   return true;
1820 }
1821 bool X86AsmParser::ParseMasmNamedOperator(StringRef Name,
1822                                           IntelExprStateMachine &SM,
1823                                           bool &ParseError, SMLoc &End) {
1824   if (Name.equals_insensitive("eq")) {
1825     SM.onEq();
1826   } else if (Name.equals_insensitive("ne")) {
1827     SM.onNE();
1828   } else if (Name.equals_insensitive("lt")) {
1829     SM.onLT();
1830   } else if (Name.equals_insensitive("le")) {
1831     SM.onLE();
1832   } else if (Name.equals_insensitive("gt")) {
1833     SM.onGT();
1834   } else if (Name.equals_insensitive("ge")) {
1835     SM.onGE();
1836   } else {
1837     return false;
1838   }
1839   End = consumeToken();
1840   return true;
1841 }
1842 
1843 bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
1844   MCAsmParser &Parser = getParser();
1845   StringRef ErrMsg;
1846 
1847   AsmToken::TokenKind PrevTK = AsmToken::Error;
1848   bool Done = false;
1849   while (!Done) {
1850     // Get a fresh reference on each loop iteration in case the previous
1851     // iteration moved the token storage during UnLex().
1852     const AsmToken &Tok = Parser.getTok();
1853 
1854     bool UpdateLocLex = true;
1855     AsmToken::TokenKind TK = getLexer().getKind();
1856 
1857     switch (TK) {
1858     default:
1859       if ((Done = SM.isValidEndState()))
1860         break;
1861       return Error(Tok.getLoc(), "unknown token in expression");
1862     case AsmToken::Error:
1863       return Error(getLexer().getErrLoc(), getLexer().getErr());
1864       break;
1865     case AsmToken::EndOfStatement:
1866       Done = true;
1867       break;
1868     case AsmToken::Real:
1869       // DotOperator: [ebx].0
1870       UpdateLocLex = false;
1871       if (ParseIntelDotOperator(SM, End))
1872         return true;
1873       break;
1874     case AsmToken::Dot:
1875       if (!Parser.isParsingMasm()) {
1876         if ((Done = SM.isValidEndState()))
1877           break;
1878         return Error(Tok.getLoc(), "unknown token in expression");
1879       }
1880       // MASM allows spaces around the dot operator (e.g., "var . x")
1881       Lex();
1882       UpdateLocLex = false;
1883       if (ParseIntelDotOperator(SM, End))
1884         return true;
1885       break;
1886     case AsmToken::Dollar:
1887       if (!Parser.isParsingMasm()) {
1888         if ((Done = SM.isValidEndState()))
1889           break;
1890         return Error(Tok.getLoc(), "unknown token in expression");
1891       }
1892       LLVM_FALLTHROUGH;
1893     case AsmToken::String: {
1894       if (Parser.isParsingMasm()) {
1895         // MASM parsers handle strings in expressions as constants.
1896         SMLoc ValueLoc = Tok.getLoc();
1897         int64_t Res;
1898         const MCExpr *Val;
1899         if (Parser.parsePrimaryExpr(Val, End, nullptr))
1900           return true;
1901         UpdateLocLex = false;
1902         if (!Val->evaluateAsAbsolute(Res, getStreamer().getAssemblerPtr()))
1903           return Error(ValueLoc, "expected absolute value");
1904         if (SM.onInteger(Res, ErrMsg))
1905           return Error(ValueLoc, ErrMsg);
1906         break;
1907       }
1908       LLVM_FALLTHROUGH;
1909     }
1910     case AsmToken::At:
1911     case AsmToken::Identifier: {
1912       SMLoc IdentLoc = Tok.getLoc();
1913       StringRef Identifier = Tok.getString();
1914       UpdateLocLex = false;
1915       if (Parser.isParsingMasm()) {
1916         size_t DotOffset = Identifier.find_first_of('.');
1917         if (DotOffset != StringRef::npos) {
1918           consumeToken();
1919           StringRef LHS = Identifier.slice(0, DotOffset);
1920           StringRef Dot = Identifier.slice(DotOffset, DotOffset + 1);
1921           StringRef RHS = Identifier.slice(DotOffset + 1, StringRef::npos);
1922           if (!RHS.empty()) {
1923             getLexer().UnLex(AsmToken(AsmToken::Identifier, RHS));
1924           }
1925           getLexer().UnLex(AsmToken(AsmToken::Dot, Dot));
1926           if (!LHS.empty()) {
1927             getLexer().UnLex(AsmToken(AsmToken::Identifier, LHS));
1928           }
1929           break;
1930         }
1931       }
1932       // (MASM only) <TYPE> PTR operator
1933       if (Parser.isParsingMasm()) {
1934         const AsmToken &NextTok = getLexer().peekTok();
1935         if (NextTok.is(AsmToken::Identifier) &&
1936             NextTok.getIdentifier().equals_insensitive("ptr")) {
1937           AsmTypeInfo Info;
1938           if (Parser.lookUpType(Identifier, Info))
1939             return Error(Tok.getLoc(), "unknown type");
1940           SM.onCast(Info);
1941           // Eat type and PTR.
1942           consumeToken();
1943           End = consumeToken();
1944           break;
1945         }
1946       }
1947       // Register, or (MASM only) <register>.<field>
1948       unsigned Reg;
1949       if (Tok.is(AsmToken::Identifier)) {
1950         if (!ParseRegister(Reg, IdentLoc, End, /*RestoreOnFailure=*/true)) {
1951           if (SM.onRegister(Reg, ErrMsg))
1952             return Error(IdentLoc, ErrMsg);
1953           break;
1954         }
1955         if (Parser.isParsingMasm()) {
1956           const std::pair<StringRef, StringRef> IDField =
1957               Tok.getString().split('.');
1958           const StringRef ID = IDField.first, Field = IDField.second;
1959           SMLoc IDEndLoc = SMLoc::getFromPointer(ID.data() + ID.size());
1960           if (!Field.empty() &&
1961               !MatchRegisterByName(Reg, ID, IdentLoc, IDEndLoc)) {
1962             if (SM.onRegister(Reg, ErrMsg))
1963               return Error(IdentLoc, ErrMsg);
1964 
1965             AsmFieldInfo Info;
1966             SMLoc FieldStartLoc = SMLoc::getFromPointer(Field.data());
1967             if (Parser.lookUpField(Field, Info))
1968               return Error(FieldStartLoc, "unknown offset");
1969             else if (SM.onPlus(ErrMsg))
1970               return Error(getTok().getLoc(), ErrMsg);
1971             else if (SM.onInteger(Info.Offset, ErrMsg))
1972               return Error(IdentLoc, ErrMsg);
1973             SM.setTypeInfo(Info.Type);
1974 
1975             End = consumeToken();
1976             break;
1977           }
1978         }
1979       }
1980       // Operator synonymous ("not", "or" etc.)
1981       bool ParseError = false;
1982       if (ParseIntelNamedOperator(Identifier, SM, ParseError, End)) {
1983         if (ParseError)
1984           return true;
1985         break;
1986       }
1987       if (Parser.isParsingMasm() &&
1988           ParseMasmNamedOperator(Identifier, SM, ParseError, End)) {
1989         if (ParseError)
1990           return true;
1991         break;
1992       }
1993       // Symbol reference, when parsing assembly content
1994       InlineAsmIdentifierInfo Info;
1995       AsmFieldInfo FieldInfo;
1996       const MCExpr *Val;
1997       if (isParsingMSInlineAsm() || Parser.isParsingMasm()) {
1998         // MS Dot Operator expression
1999         if (Identifier.count('.') &&
2000             (PrevTK == AsmToken::RBrac || PrevTK == AsmToken::RParen)) {
2001           if (ParseIntelDotOperator(SM, End))
2002             return true;
2003           break;
2004         }
2005       }
2006       if (isParsingMSInlineAsm()) {
2007         // MS InlineAsm operators (TYPE/LENGTH/SIZE)
2008         if (unsigned OpKind = IdentifyIntelInlineAsmOperator(Identifier)) {
2009           if (int64_t Val = ParseIntelInlineAsmOperator(OpKind)) {
2010             if (SM.onInteger(Val, ErrMsg))
2011               return Error(IdentLoc, ErrMsg);
2012           } else {
2013             return true;
2014           }
2015           break;
2016         }
2017         // MS InlineAsm identifier
2018         // Call parseIdentifier() to combine @ with the identifier behind it.
2019         if (TK == AsmToken::At && Parser.parseIdentifier(Identifier))
2020           return Error(IdentLoc, "expected identifier");
2021         if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, false, End))
2022           return true;
2023         else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
2024                                      true, ErrMsg))
2025           return Error(IdentLoc, ErrMsg);
2026         break;
2027       }
2028       if (Parser.isParsingMasm()) {
2029         if (unsigned OpKind = IdentifyMasmOperator(Identifier)) {
2030           int64_t Val;
2031           if (ParseMasmOperator(OpKind, Val))
2032             return true;
2033           if (SM.onInteger(Val, ErrMsg))
2034             return Error(IdentLoc, ErrMsg);
2035           break;
2036         }
2037         if (!getParser().lookUpType(Identifier, FieldInfo.Type)) {
2038           // Field offset immediate; <TYPE>.<field specification>
2039           Lex(); // eat type
2040           bool EndDot = parseOptionalToken(AsmToken::Dot);
2041           while (EndDot || (getTok().is(AsmToken::Identifier) &&
2042                             getTok().getString().startswith("."))) {
2043             getParser().parseIdentifier(Identifier);
2044             if (!EndDot)
2045               Identifier.consume_front(".");
2046             EndDot = Identifier.consume_back(".");
2047             if (getParser().lookUpField(FieldInfo.Type.Name, Identifier,
2048                                         FieldInfo)) {
2049               SMLoc IDEnd =
2050                   SMLoc::getFromPointer(Identifier.data() + Identifier.size());
2051               return Error(IdentLoc, "Unable to lookup field reference!",
2052                            SMRange(IdentLoc, IDEnd));
2053             }
2054             if (!EndDot)
2055               EndDot = parseOptionalToken(AsmToken::Dot);
2056           }
2057           if (SM.onInteger(FieldInfo.Offset, ErrMsg))
2058             return Error(IdentLoc, ErrMsg);
2059           break;
2060         }
2061       }
2062       if (getParser().parsePrimaryExpr(Val, End, &FieldInfo.Type)) {
2063         return Error(Tok.getLoc(), "Unexpected identifier!");
2064       } else if (SM.onIdentifierExpr(Val, Identifier, Info, FieldInfo.Type,
2065                                      false, ErrMsg)) {
2066         return Error(IdentLoc, ErrMsg);
2067       }
2068       break;
2069     }
2070     case AsmToken::Integer: {
2071       // Look for 'b' or 'f' following an Integer as a directional label
2072       SMLoc Loc = getTok().getLoc();
2073       int64_t IntVal = getTok().getIntVal();
2074       End = consumeToken();
2075       UpdateLocLex = false;
2076       if (getLexer().getKind() == AsmToken::Identifier) {
2077         StringRef IDVal = getTok().getString();
2078         if (IDVal == "f" || IDVal == "b") {
2079           MCSymbol *Sym =
2080               getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
2081           MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
2082           const MCExpr *Val =
2083               MCSymbolRefExpr::create(Sym, Variant, getContext());
2084           if (IDVal == "b" && Sym->isUndefined())
2085             return Error(Loc, "invalid reference to undefined symbol");
2086           StringRef Identifier = Sym->getName();
2087           InlineAsmIdentifierInfo Info;
2088           AsmTypeInfo Type;
2089           if (SM.onIdentifierExpr(Val, Identifier, Info, Type,
2090                                   isParsingMSInlineAsm(), ErrMsg))
2091             return Error(Loc, ErrMsg);
2092           End = consumeToken();
2093         } else {
2094           if (SM.onInteger(IntVal, ErrMsg))
2095             return Error(Loc, ErrMsg);
2096         }
2097       } else {
2098         if (SM.onInteger(IntVal, ErrMsg))
2099           return Error(Loc, ErrMsg);
2100       }
2101       break;
2102     }
2103     case AsmToken::Plus:
2104       if (SM.onPlus(ErrMsg))
2105         return Error(getTok().getLoc(), ErrMsg);
2106       break;
2107     case AsmToken::Minus:
2108       if (SM.onMinus(ErrMsg))
2109         return Error(getTok().getLoc(), ErrMsg);
2110       break;
2111     case AsmToken::Tilde:   SM.onNot(); break;
2112     case AsmToken::Star:    SM.onStar(); break;
2113     case AsmToken::Slash:   SM.onDivide(); break;
2114     case AsmToken::Percent: SM.onMod(); break;
2115     case AsmToken::Pipe:    SM.onOr(); break;
2116     case AsmToken::Caret:   SM.onXor(); break;
2117     case AsmToken::Amp:     SM.onAnd(); break;
2118     case AsmToken::LessLess:
2119                             SM.onLShift(); break;
2120     case AsmToken::GreaterGreater:
2121                             SM.onRShift(); break;
2122     case AsmToken::LBrac:
2123       if (SM.onLBrac())
2124         return Error(Tok.getLoc(), "unexpected bracket encountered");
2125       break;
2126     case AsmToken::RBrac:
2127       if (SM.onRBrac())
2128         return Error(Tok.getLoc(), "unexpected bracket encountered");
2129       break;
2130     case AsmToken::LParen:  SM.onLParen(); break;
2131     case AsmToken::RParen:  SM.onRParen(); break;
2132     }
2133     if (SM.hadError())
2134       return Error(Tok.getLoc(), "unknown token in expression");
2135 
2136     if (!Done && UpdateLocLex)
2137       End = consumeToken();
2138 
2139     PrevTK = TK;
2140   }
2141   return false;
2142 }
2143 
2144 void X86AsmParser::RewriteIntelExpression(IntelExprStateMachine &SM,
2145                                           SMLoc Start, SMLoc End) {
2146   SMLoc Loc = Start;
2147   unsigned ExprLen = End.getPointer() - Start.getPointer();
2148   // Skip everything before a symbol displacement (if we have one)
2149   if (SM.getSym() && !SM.isOffsetOperator()) {
2150     StringRef SymName = SM.getSymName();
2151     if (unsigned Len = SymName.data() - Start.getPointer())
2152       InstInfo->AsmRewrites->emplace_back(AOK_Skip, Start, Len);
2153     Loc = SMLoc::getFromPointer(SymName.data() + SymName.size());
2154     ExprLen = End.getPointer() - (SymName.data() + SymName.size());
2155     // If we have only a symbol than there's no need for complex rewrite,
2156     // simply skip everything after it
2157     if (!(SM.getBaseReg() || SM.getIndexReg() || SM.getImm())) {
2158       if (ExprLen)
2159         InstInfo->AsmRewrites->emplace_back(AOK_Skip, Loc, ExprLen);
2160       return;
2161     }
2162   }
2163   // Build an Intel Expression rewrite
2164   StringRef BaseRegStr;
2165   StringRef IndexRegStr;
2166   StringRef OffsetNameStr;
2167   if (SM.getBaseReg())
2168     BaseRegStr = X86IntelInstPrinter::getRegisterName(SM.getBaseReg());
2169   if (SM.getIndexReg())
2170     IndexRegStr = X86IntelInstPrinter::getRegisterName(SM.getIndexReg());
2171   if (SM.isOffsetOperator())
2172     OffsetNameStr = SM.getSymName();
2173   // Emit it
2174   IntelExpr Expr(BaseRegStr, IndexRegStr, SM.getScale(), OffsetNameStr,
2175                  SM.getImm(), SM.isMemExpr());
2176   InstInfo->AsmRewrites->emplace_back(Loc, ExprLen, Expr);
2177 }
2178 
2179 // Inline assembly may use variable names with namespace alias qualifiers.
2180 bool X86AsmParser::ParseIntelInlineAsmIdentifier(
2181     const MCExpr *&Val, StringRef &Identifier, InlineAsmIdentifierInfo &Info,
2182     bool IsUnevaluatedOperand, SMLoc &End, bool IsParsingOffsetOperator) {
2183   MCAsmParser &Parser = getParser();
2184   assert(isParsingMSInlineAsm() && "Expected to be parsing inline assembly.");
2185   Val = nullptr;
2186 
2187   StringRef LineBuf(Identifier.data());
2188   SemaCallback->LookupInlineAsmIdentifier(LineBuf, Info, IsUnevaluatedOperand);
2189 
2190   const AsmToken &Tok = Parser.getTok();
2191   SMLoc Loc = Tok.getLoc();
2192 
2193   // Advance the token stream until the end of the current token is
2194   // after the end of what the frontend claimed.
2195   const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
2196   do {
2197     End = Tok.getEndLoc();
2198     getLexer().Lex();
2199   } while (End.getPointer() < EndPtr);
2200   Identifier = LineBuf;
2201 
2202   // The frontend should end parsing on an assembler token boundary, unless it
2203   // failed parsing.
2204   assert((End.getPointer() == EndPtr ||
2205           Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) &&
2206           "frontend claimed part of a token?");
2207 
2208   // If the identifier lookup was unsuccessful, assume that we are dealing with
2209   // a label.
2210   if (Info.isKind(InlineAsmIdentifierInfo::IK_Invalid)) {
2211     StringRef InternalName =
2212       SemaCallback->LookupInlineAsmLabel(Identifier, getSourceManager(),
2213                                          Loc, false);
2214     assert(InternalName.size() && "We should have an internal name here.");
2215     // Push a rewrite for replacing the identifier name with the internal name,
2216     // unless we are parsing the operand of an offset operator
2217     if (!IsParsingOffsetOperator)
2218       InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
2219                                           InternalName);
2220     else
2221       Identifier = InternalName;
2222   } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal))
2223     return false;
2224   // Create the symbol reference.
2225   MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
2226   MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
2227   Val = MCSymbolRefExpr::create(Sym, Variant, getParser().getContext());
2228   return false;
2229 }
2230 
2231 //ParseRoundingModeOp - Parse AVX-512 rounding mode operand
2232 bool X86AsmParser::ParseRoundingModeOp(SMLoc Start, OperandVector &Operands) {
2233   MCAsmParser &Parser = getParser();
2234   const AsmToken &Tok = Parser.getTok();
2235   // Eat "{" and mark the current place.
2236   const SMLoc consumedToken = consumeToken();
2237   if (Tok.isNot(AsmToken::Identifier))
2238     return Error(Tok.getLoc(), "Expected an identifier after {");
2239   if (Tok.getIdentifier().startswith("r")){
2240     int rndMode = StringSwitch<int>(Tok.getIdentifier())
2241       .Case("rn", X86::STATIC_ROUNDING::TO_NEAREST_INT)
2242       .Case("rd", X86::STATIC_ROUNDING::TO_NEG_INF)
2243       .Case("ru", X86::STATIC_ROUNDING::TO_POS_INF)
2244       .Case("rz", X86::STATIC_ROUNDING::TO_ZERO)
2245       .Default(-1);
2246     if (-1 == rndMode)
2247       return Error(Tok.getLoc(), "Invalid rounding mode.");
2248      Parser.Lex();  // Eat "r*" of r*-sae
2249     if (!getLexer().is(AsmToken::Minus))
2250       return Error(Tok.getLoc(), "Expected - at this point");
2251     Parser.Lex();  // Eat "-"
2252     Parser.Lex();  // Eat the sae
2253     if (!getLexer().is(AsmToken::RCurly))
2254       return Error(Tok.getLoc(), "Expected } at this point");
2255     SMLoc End = Tok.getEndLoc();
2256     Parser.Lex();  // Eat "}"
2257     const MCExpr *RndModeOp =
2258       MCConstantExpr::create(rndMode, Parser.getContext());
2259     Operands.push_back(X86Operand::CreateImm(RndModeOp, Start, End));
2260     return false;
2261   }
2262   if(Tok.getIdentifier().equals("sae")){
2263     Parser.Lex();  // Eat the sae
2264     if (!getLexer().is(AsmToken::RCurly))
2265       return Error(Tok.getLoc(), "Expected } at this point");
2266     Parser.Lex();  // Eat "}"
2267     Operands.push_back(X86Operand::CreateToken("{sae}", consumedToken));
2268     return false;
2269   }
2270   return Error(Tok.getLoc(), "unknown token in expression");
2271 }
2272 
2273 /// Parse the '.' operator.
2274 bool X86AsmParser::ParseIntelDotOperator(IntelExprStateMachine &SM,
2275                                          SMLoc &End) {
2276   const AsmToken &Tok = getTok();
2277   AsmFieldInfo Info;
2278 
2279   // Drop the optional '.'.
2280   StringRef DotDispStr = Tok.getString();
2281   if (DotDispStr.startswith("."))
2282     DotDispStr = DotDispStr.drop_front(1);
2283   StringRef TrailingDot;
2284 
2285   // .Imm gets lexed as a real.
2286   if (Tok.is(AsmToken::Real)) {
2287     APInt DotDisp;
2288     DotDispStr.getAsInteger(10, DotDisp);
2289     Info.Offset = DotDisp.getZExtValue();
2290   } else if ((isParsingMSInlineAsm() || getParser().isParsingMasm()) &&
2291              Tok.is(AsmToken::Identifier)) {
2292     if (DotDispStr.endswith(".")) {
2293       TrailingDot = DotDispStr.substr(DotDispStr.size() - 1);
2294       DotDispStr = DotDispStr.drop_back(1);
2295     }
2296     const std::pair<StringRef, StringRef> BaseMember = DotDispStr.split('.');
2297     const StringRef Base = BaseMember.first, Member = BaseMember.second;
2298     if (getParser().lookUpField(SM.getType(), DotDispStr, Info) &&
2299         getParser().lookUpField(SM.getSymName(), DotDispStr, Info) &&
2300         getParser().lookUpField(DotDispStr, Info) &&
2301         (!SemaCallback ||
2302          SemaCallback->LookupInlineAsmField(Base, Member, Info.Offset)))
2303       return Error(Tok.getLoc(), "Unable to lookup field reference!");
2304   } else {
2305     return Error(Tok.getLoc(), "Unexpected token type!");
2306   }
2307 
2308   // Eat the DotExpression and update End
2309   End = SMLoc::getFromPointer(DotDispStr.data());
2310   const char *DotExprEndLoc = DotDispStr.data() + DotDispStr.size();
2311   while (Tok.getLoc().getPointer() < DotExprEndLoc)
2312     Lex();
2313   if (!TrailingDot.empty())
2314     getLexer().UnLex(AsmToken(AsmToken::Dot, TrailingDot));
2315   SM.addImm(Info.Offset);
2316   SM.setTypeInfo(Info.Type);
2317   return false;
2318 }
2319 
2320 /// Parse the 'offset' operator.
2321 /// This operator is used to specify the location of a given operand
2322 bool X86AsmParser::ParseIntelOffsetOperator(const MCExpr *&Val, StringRef &ID,
2323                                             InlineAsmIdentifierInfo &Info,
2324                                             SMLoc &End) {
2325   // Eat offset, mark start of identifier.
2326   SMLoc Start = Lex().getLoc();
2327   ID = getTok().getString();
2328   if (!isParsingMSInlineAsm()) {
2329     if ((getTok().isNot(AsmToken::Identifier) &&
2330          getTok().isNot(AsmToken::String)) ||
2331         getParser().parsePrimaryExpr(Val, End, nullptr))
2332       return Error(Start, "unexpected token!");
2333   } else if (ParseIntelInlineAsmIdentifier(Val, ID, Info, false, End, true)) {
2334     return Error(Start, "unable to lookup expression");
2335   } else if (Info.isKind(InlineAsmIdentifierInfo::IK_EnumVal)) {
2336     return Error(Start, "offset operator cannot yet handle constants");
2337   }
2338   return false;
2339 }
2340 
2341 // Query a candidate string for being an Intel assembly operator
2342 // Report back its kind, or IOK_INVALID if does not evaluated as a known one
2343 unsigned X86AsmParser::IdentifyIntelInlineAsmOperator(StringRef Name) {
2344   return StringSwitch<unsigned>(Name)
2345     .Cases("TYPE","type",IOK_TYPE)
2346     .Cases("SIZE","size",IOK_SIZE)
2347     .Cases("LENGTH","length",IOK_LENGTH)
2348     .Default(IOK_INVALID);
2349 }
2350 
2351 /// Parse the 'LENGTH', 'TYPE' and 'SIZE' operators.  The LENGTH operator
2352 /// returns the number of elements in an array.  It returns the value 1 for
2353 /// non-array variables.  The SIZE operator returns the size of a C or C++
2354 /// variable.  A variable's size is the product of its LENGTH and TYPE.  The
2355 /// TYPE operator returns the size of a C or C++ type or variable. If the
2356 /// variable is an array, TYPE returns the size of a single element.
2357 unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
2358   MCAsmParser &Parser = getParser();
2359   const AsmToken &Tok = Parser.getTok();
2360   Parser.Lex(); // Eat operator.
2361 
2362   const MCExpr *Val = nullptr;
2363   InlineAsmIdentifierInfo Info;
2364   SMLoc Start = Tok.getLoc(), End;
2365   StringRef Identifier = Tok.getString();
2366   if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
2367                                     /*IsUnevaluatedOperand=*/true, End))
2368     return 0;
2369 
2370   if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
2371     Error(Start, "unable to lookup expression");
2372     return 0;
2373   }
2374 
2375   unsigned CVal = 0;
2376   switch(OpKind) {
2377   default: llvm_unreachable("Unexpected operand kind!");
2378   case IOK_LENGTH: CVal = Info.Var.Length; break;
2379   case IOK_SIZE: CVal = Info.Var.Size; break;
2380   case IOK_TYPE: CVal = Info.Var.Type; break;
2381   }
2382 
2383   return CVal;
2384 }
2385 
2386 // Query a candidate string for being an Intel assembly operator
2387 // Report back its kind, or IOK_INVALID if does not evaluated as a known one
2388 unsigned X86AsmParser::IdentifyMasmOperator(StringRef Name) {
2389   return StringSwitch<unsigned>(Name.lower())
2390       .Case("type", MOK_TYPE)
2391       .Cases("size", "sizeof", MOK_SIZEOF)
2392       .Cases("length", "lengthof", MOK_LENGTHOF)
2393       .Default(MOK_INVALID);
2394 }
2395 
2396 /// Parse the 'LENGTHOF', 'SIZEOF', and 'TYPE' operators.  The LENGTHOF operator
2397 /// returns the number of elements in an array.  It returns the value 1 for
2398 /// non-array variables.  The SIZEOF operator returns the size of a type or
2399 /// variable in bytes.  A variable's size is the product of its LENGTH and TYPE.
2400 /// The TYPE operator returns the size of a variable. If the variable is an
2401 /// array, TYPE returns the size of a single element.
2402 bool X86AsmParser::ParseMasmOperator(unsigned OpKind, int64_t &Val) {
2403   MCAsmParser &Parser = getParser();
2404   SMLoc OpLoc = Parser.getTok().getLoc();
2405   Parser.Lex(); // Eat operator.
2406 
2407   Val = 0;
2408   if (OpKind == MOK_SIZEOF || OpKind == MOK_TYPE) {
2409     // Check for SIZEOF(<type>) and TYPE(<type>).
2410     bool InParens = Parser.getTok().is(AsmToken::LParen);
2411     const AsmToken &IDTok = InParens ? getLexer().peekTok() : Parser.getTok();
2412     AsmTypeInfo Type;
2413     if (IDTok.is(AsmToken::Identifier) &&
2414         !Parser.lookUpType(IDTok.getIdentifier(), Type)) {
2415       Val = Type.Size;
2416 
2417       // Eat tokens.
2418       if (InParens)
2419         parseToken(AsmToken::LParen);
2420       parseToken(AsmToken::Identifier);
2421       if (InParens)
2422         parseToken(AsmToken::RParen);
2423     }
2424   }
2425 
2426   if (!Val) {
2427     IntelExprStateMachine SM;
2428     SMLoc End, Start = Parser.getTok().getLoc();
2429     if (ParseIntelExpression(SM, End))
2430       return true;
2431 
2432     switch (OpKind) {
2433     default:
2434       llvm_unreachable("Unexpected operand kind!");
2435     case MOK_SIZEOF:
2436       Val = SM.getSize();
2437       break;
2438     case MOK_LENGTHOF:
2439       Val = SM.getLength();
2440       break;
2441     case MOK_TYPE:
2442       Val = SM.getElementSize();
2443       break;
2444     }
2445 
2446     if (!Val)
2447       return Error(OpLoc, "expression has unknown type", SMRange(Start, End));
2448   }
2449 
2450   return false;
2451 }
2452 
2453 bool X86AsmParser::ParseIntelMemoryOperandSize(unsigned &Size) {
2454   Size = StringSwitch<unsigned>(getTok().getString())
2455     .Cases("BYTE", "byte", 8)
2456     .Cases("WORD", "word", 16)
2457     .Cases("DWORD", "dword", 32)
2458     .Cases("FLOAT", "float", 32)
2459     .Cases("LONG", "long", 32)
2460     .Cases("FWORD", "fword", 48)
2461     .Cases("DOUBLE", "double", 64)
2462     .Cases("QWORD", "qword", 64)
2463     .Cases("MMWORD","mmword", 64)
2464     .Cases("XWORD", "xword", 80)
2465     .Cases("TBYTE", "tbyte", 80)
2466     .Cases("XMMWORD", "xmmword", 128)
2467     .Cases("YMMWORD", "ymmword", 256)
2468     .Cases("ZMMWORD", "zmmword", 512)
2469     .Default(0);
2470   if (Size) {
2471     const AsmToken &Tok = Lex(); // Eat operand size (e.g., byte, word).
2472     if (!(Tok.getString().equals("PTR") || Tok.getString().equals("ptr")))
2473       return Error(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
2474     Lex(); // Eat ptr.
2475   }
2476   return false;
2477 }
2478 
2479 bool X86AsmParser::ParseIntelOperand(OperandVector &Operands) {
2480   MCAsmParser &Parser = getParser();
2481   const AsmToken &Tok = Parser.getTok();
2482   SMLoc Start, End;
2483 
2484   // Parse optional Size directive.
2485   unsigned Size;
2486   if (ParseIntelMemoryOperandSize(Size))
2487     return true;
2488   bool PtrInOperand = bool(Size);
2489 
2490   Start = Tok.getLoc();
2491 
2492   // Rounding mode operand.
2493   if (getLexer().is(AsmToken::LCurly))
2494     return ParseRoundingModeOp(Start, Operands);
2495 
2496   // Register operand.
2497   unsigned RegNo = 0;
2498   if (Tok.is(AsmToken::Identifier) && !ParseRegister(RegNo, Start, End)) {
2499     if (RegNo == X86::RIP)
2500       return Error(Start, "rip can only be used as a base register");
2501     // A Register followed by ':' is considered a segment override
2502     if (Tok.isNot(AsmToken::Colon)) {
2503       if (PtrInOperand)
2504         return Error(Start, "expected memory operand after 'ptr', "
2505                             "found register operand instead");
2506       Operands.push_back(X86Operand::CreateReg(RegNo, Start, End));
2507       return false;
2508     }
2509     // An alleged segment override. check if we have a valid segment register
2510     if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo))
2511       return Error(Start, "invalid segment register");
2512     // Eat ':' and update Start location
2513     Start = Lex().getLoc();
2514   }
2515 
2516   // Immediates and Memory
2517   IntelExprStateMachine SM;
2518   if (ParseIntelExpression(SM, End))
2519     return true;
2520 
2521   if (isParsingMSInlineAsm())
2522     RewriteIntelExpression(SM, Start, Tok.getLoc());
2523 
2524   int64_t Imm = SM.getImm();
2525   const MCExpr *Disp = SM.getSym();
2526   const MCExpr *ImmDisp = MCConstantExpr::create(Imm, getContext());
2527   if (Disp && Imm)
2528     Disp = MCBinaryExpr::createAdd(Disp, ImmDisp, getContext());
2529   if (!Disp)
2530     Disp = ImmDisp;
2531 
2532   // RegNo != 0 specifies a valid segment register,
2533   // and we are parsing a segment override
2534   if (!SM.isMemExpr() && !RegNo) {
2535     if (isParsingMSInlineAsm() && SM.isOffsetOperator()) {
2536       const InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
2537       if (Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {
2538         // Disp includes the address of a variable; make sure this is recorded
2539         // for later handling.
2540         Operands.push_back(X86Operand::CreateImm(Disp, Start, End,
2541                                                  SM.getSymName(), Info.Var.Decl,
2542                                                  Info.Var.IsGlobalLV));
2543         return false;
2544       }
2545     }
2546 
2547     Operands.push_back(X86Operand::CreateImm(Disp, Start, End));
2548     return false;
2549   }
2550 
2551   StringRef ErrMsg;
2552   unsigned BaseReg = SM.getBaseReg();
2553   unsigned IndexReg = SM.getIndexReg();
2554   unsigned Scale = SM.getScale();
2555   if (!PtrInOperand)
2556     Size = SM.getElementSize() << 3;
2557 
2558   if (Scale == 0 && BaseReg != X86::ESP && BaseReg != X86::RSP &&
2559       (IndexReg == X86::ESP || IndexReg == X86::RSP))
2560     std::swap(BaseReg, IndexReg);
2561 
2562   // If BaseReg is a vector register and IndexReg is not, swap them unless
2563   // Scale was specified in which case it would be an error.
2564   if (Scale == 0 &&
2565       !(X86MCRegisterClasses[X86::VR128XRegClassID].contains(IndexReg) ||
2566         X86MCRegisterClasses[X86::VR256XRegClassID].contains(IndexReg) ||
2567         X86MCRegisterClasses[X86::VR512RegClassID].contains(IndexReg)) &&
2568       (X86MCRegisterClasses[X86::VR128XRegClassID].contains(BaseReg) ||
2569        X86MCRegisterClasses[X86::VR256XRegClassID].contains(BaseReg) ||
2570        X86MCRegisterClasses[X86::VR512RegClassID].contains(BaseReg)))
2571     std::swap(BaseReg, IndexReg);
2572 
2573   if (Scale != 0 &&
2574       X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg))
2575     return Error(Start, "16-bit addresses cannot have a scale");
2576 
2577   // If there was no explicit scale specified, change it to 1.
2578   if (Scale == 0)
2579     Scale = 1;
2580 
2581   // If this is a 16-bit addressing mode with the base and index in the wrong
2582   // order, swap them so CheckBaseRegAndIndexRegAndScale doesn't fail. It is
2583   // shared with att syntax where order matters.
2584   if ((BaseReg == X86::SI || BaseReg == X86::DI) &&
2585       (IndexReg == X86::BX || IndexReg == X86::BP))
2586     std::swap(BaseReg, IndexReg);
2587 
2588   if ((BaseReg || IndexReg) &&
2589       CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
2590                                       ErrMsg))
2591     return Error(Start, ErrMsg);
2592   if (isParsingMSInlineAsm())
2593     return CreateMemForMSInlineAsm(RegNo, Disp, BaseReg, IndexReg, Scale, Start,
2594                                    End, Size, SM.getSymName(),
2595                                    SM.getIdentifierInfo(), Operands);
2596 
2597   // When parsing x64 MS-style assembly, all non-absolute references to a named
2598   // variable default to RIP-relative.
2599   if (Parser.isParsingMasm() && is64BitMode() && SM.getElementSize() > 0) {
2600     Operands.push_back(X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
2601                                              BaseReg, IndexReg, Scale, Start,
2602                                              End, Size,
2603                                              /*DefaultBaseReg=*/X86::RIP));
2604     return false;
2605   }
2606 
2607   if ((BaseReg || IndexReg || RegNo))
2608     Operands.push_back(X86Operand::CreateMem(getPointerWidth(), RegNo, Disp,
2609                                              BaseReg, IndexReg, Scale, Start,
2610                                              End, Size));
2611   else
2612     Operands.push_back(
2613         X86Operand::CreateMem(getPointerWidth(), Disp, Start, End, Size));
2614   return false;
2615 }
2616 
2617 bool X86AsmParser::ParseATTOperand(OperandVector &Operands) {
2618   MCAsmParser &Parser = getParser();
2619   switch (getLexer().getKind()) {
2620   case AsmToken::Dollar: {
2621     // $42 or $ID -> immediate.
2622     SMLoc Start = Parser.getTok().getLoc(), End;
2623     Parser.Lex();
2624     const MCExpr *Val;
2625     // This is an immediate, so we should not parse a register. Do a precheck
2626     // for '%' to supercede intra-register parse errors.
2627     SMLoc L = Parser.getTok().getLoc();
2628     if (check(getLexer().is(AsmToken::Percent), L,
2629               "expected immediate expression") ||
2630         getParser().parseExpression(Val, End) ||
2631         check(isa<X86MCExpr>(Val), L, "expected immediate expression"))
2632       return true;
2633     Operands.push_back(X86Operand::CreateImm(Val, Start, End));
2634     return false;
2635   }
2636   case AsmToken::LCurly: {
2637     SMLoc Start = Parser.getTok().getLoc();
2638     return ParseRoundingModeOp(Start, Operands);
2639   }
2640   default: {
2641     // This a memory operand or a register. We have some parsing complications
2642     // as a '(' may be part of an immediate expression or the addressing mode
2643     // block. This is complicated by the fact that an assembler-level variable
2644     // may refer either to a register or an immediate expression.
2645 
2646     SMLoc Loc = Parser.getTok().getLoc(), EndLoc;
2647     const MCExpr *Expr = nullptr;
2648     unsigned Reg = 0;
2649     if (getLexer().isNot(AsmToken::LParen)) {
2650       // No '(' so this is either a displacement expression or a register.
2651       if (Parser.parseExpression(Expr, EndLoc))
2652         return true;
2653       if (auto *RE = dyn_cast<X86MCExpr>(Expr)) {
2654         // Segment Register. Reset Expr and copy value to register.
2655         Expr = nullptr;
2656         Reg = RE->getRegNo();
2657 
2658         // Sanity check register.
2659         if (Reg == X86::EIZ || Reg == X86::RIZ)
2660           return Error(
2661               Loc, "%eiz and %riz can only be used as index registers",
2662               SMRange(Loc, EndLoc));
2663         if (Reg == X86::RIP)
2664           return Error(Loc, "%rip can only be used as a base register",
2665                        SMRange(Loc, EndLoc));
2666         // Return register that are not segment prefixes immediately.
2667         if (!Parser.parseOptionalToken(AsmToken::Colon)) {
2668           Operands.push_back(X86Operand::CreateReg(Reg, Loc, EndLoc));
2669           return false;
2670         }
2671         if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg))
2672           return Error(Loc, "invalid segment register");
2673         // Accept a '*' absolute memory reference after the segment. Place it
2674         // before the full memory operand.
2675         if (getLexer().is(AsmToken::Star))
2676           Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
2677       }
2678     }
2679     // This is a Memory operand.
2680     return ParseMemOperand(Reg, Expr, Loc, EndLoc, Operands);
2681   }
2682   }
2683 }
2684 
2685 // X86::COND_INVALID if not a recognized condition code or alternate mnemonic,
2686 // otherwise the EFLAGS Condition Code enumerator.
2687 X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) {
2688   return StringSwitch<X86::CondCode>(CC)
2689       .Case("o", X86::COND_O)          // Overflow
2690       .Case("no", X86::COND_NO)        // No Overflow
2691       .Cases("b", "nae", X86::COND_B)  // Below/Neither Above nor Equal
2692       .Cases("ae", "nb", X86::COND_AE) // Above or Equal/Not Below
2693       .Cases("e", "z", X86::COND_E)    // Equal/Zero
2694       .Cases("ne", "nz", X86::COND_NE) // Not Equal/Not Zero
2695       .Cases("be", "na", X86::COND_BE) // Below or Equal/Not Above
2696       .Cases("a", "nbe", X86::COND_A)  // Above/Neither Below nor Equal
2697       .Case("s", X86::COND_S)          // Sign
2698       .Case("ns", X86::COND_NS)        // No Sign
2699       .Cases("p", "pe", X86::COND_P)   // Parity/Parity Even
2700       .Cases("np", "po", X86::COND_NP) // No Parity/Parity Odd
2701       .Cases("l", "nge", X86::COND_L)  // Less/Neither Greater nor Equal
2702       .Cases("ge", "nl", X86::COND_GE) // Greater or Equal/Not Less
2703       .Cases("le", "ng", X86::COND_LE) // Less or Equal/Not Greater
2704       .Cases("g", "nle", X86::COND_G)  // Greater/Neither Less nor Equal
2705       .Default(X86::COND_INVALID);
2706 }
2707 
2708 // true on failure, false otherwise
2709 // If no {z} mark was found - Parser doesn't advance
2710 bool X86AsmParser::ParseZ(std::unique_ptr<X86Operand> &Z,
2711                           const SMLoc &StartLoc) {
2712   MCAsmParser &Parser = getParser();
2713   // Assuming we are just pass the '{' mark, quering the next token
2714   // Searched for {z}, but none was found. Return false, as no parsing error was
2715   // encountered
2716   if (!(getLexer().is(AsmToken::Identifier) &&
2717         (getLexer().getTok().getIdentifier() == "z")))
2718     return false;
2719   Parser.Lex(); // Eat z
2720   // Query and eat the '}' mark
2721   if (!getLexer().is(AsmToken::RCurly))
2722     return Error(getLexer().getLoc(), "Expected } at this point");
2723   Parser.Lex(); // Eat '}'
2724   // Assign Z with the {z} mark opernad
2725   Z = X86Operand::CreateToken("{z}", StartLoc);
2726   return false;
2727 }
2728 
2729 // true on failure, false otherwise
2730 bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands) {
2731   MCAsmParser &Parser = getParser();
2732   if (getLexer().is(AsmToken::LCurly)) {
2733     // Eat "{" and mark the current place.
2734     const SMLoc consumedToken = consumeToken();
2735     // Distinguish {1to<NUM>} from {%k<NUM>}.
2736     if(getLexer().is(AsmToken::Integer)) {
2737       // Parse memory broadcasting ({1to<NUM>}).
2738       if (getLexer().getTok().getIntVal() != 1)
2739         return TokError("Expected 1to<NUM> at this point");
2740       StringRef Prefix = getLexer().getTok().getString();
2741       Parser.Lex(); // Eat first token of 1to8
2742       if (!getLexer().is(AsmToken::Identifier))
2743         return TokError("Expected 1to<NUM> at this point");
2744       // Recognize only reasonable suffixes.
2745       SmallVector<char, 5> BroadcastVector;
2746       StringRef BroadcastString = (Prefix + getLexer().getTok().getIdentifier())
2747                                       .toStringRef(BroadcastVector);
2748       if (!BroadcastString.startswith("1to"))
2749         return TokError("Expected 1to<NUM> at this point");
2750       const char *BroadcastPrimitive =
2751           StringSwitch<const char *>(BroadcastString)
2752               .Case("1to2", "{1to2}")
2753               .Case("1to4", "{1to4}")
2754               .Case("1to8", "{1to8}")
2755               .Case("1to16", "{1to16}")
2756               .Default(nullptr);
2757       if (!BroadcastPrimitive)
2758         return TokError("Invalid memory broadcast primitive.");
2759       Parser.Lex(); // Eat trailing token of 1toN
2760       if (!getLexer().is(AsmToken::RCurly))
2761         return TokError("Expected } at this point");
2762       Parser.Lex();  // Eat "}"
2763       Operands.push_back(X86Operand::CreateToken(BroadcastPrimitive,
2764                                                  consumedToken));
2765       // No AVX512 specific primitives can pass
2766       // after memory broadcasting, so return.
2767       return false;
2768     } else {
2769       // Parse either {k}{z}, {z}{k}, {k} or {z}
2770       // last one have no meaning, but GCC accepts it
2771       // Currently, we're just pass a '{' mark
2772       std::unique_ptr<X86Operand> Z;
2773       if (ParseZ(Z, consumedToken))
2774         return true;
2775       // Reaching here means that parsing of the allegadly '{z}' mark yielded
2776       // no errors.
2777       // Query for the need of further parsing for a {%k<NUM>} mark
2778       if (!Z || getLexer().is(AsmToken::LCurly)) {
2779         SMLoc StartLoc = Z ? consumeToken() : consumedToken;
2780         // Parse an op-mask register mark ({%k<NUM>}), which is now to be
2781         // expected
2782         unsigned RegNo;
2783         SMLoc RegLoc;
2784         if (!ParseRegister(RegNo, RegLoc, StartLoc) &&
2785             X86MCRegisterClasses[X86::VK1RegClassID].contains(RegNo)) {
2786           if (RegNo == X86::K0)
2787             return Error(RegLoc, "Register k0 can't be used as write mask");
2788           if (!getLexer().is(AsmToken::RCurly))
2789             return Error(getLexer().getLoc(), "Expected } at this point");
2790           Operands.push_back(X86Operand::CreateToken("{", StartLoc));
2791           Operands.push_back(
2792               X86Operand::CreateReg(RegNo, StartLoc, StartLoc));
2793           Operands.push_back(X86Operand::CreateToken("}", consumeToken()));
2794         } else
2795           return Error(getLexer().getLoc(),
2796                         "Expected an op-mask register at this point");
2797         // {%k<NUM>} mark is found, inquire for {z}
2798         if (getLexer().is(AsmToken::LCurly) && !Z) {
2799           // Have we've found a parsing error, or found no (expected) {z} mark
2800           // - report an error
2801           if (ParseZ(Z, consumeToken()) || !Z)
2802             return Error(getLexer().getLoc(),
2803                          "Expected a {z} mark at this point");
2804 
2805         }
2806         // '{z}' on its own is meaningless, hence should be ignored.
2807         // on the contrary - have it been accompanied by a K register,
2808         // allow it.
2809         if (Z)
2810           Operands.push_back(std::move(Z));
2811       }
2812     }
2813   }
2814   return false;
2815 }
2816 
2817 /// ParseMemOperand: 'seg : disp(basereg, indexreg, scale)'.  The '%ds:' prefix
2818 /// has already been parsed if present. disp may be provided as well.
2819 bool X86AsmParser::ParseMemOperand(unsigned SegReg, const MCExpr *Disp,
2820                                    SMLoc StartLoc, SMLoc EndLoc,
2821                                    OperandVector &Operands) {
2822   MCAsmParser &Parser = getParser();
2823   SMLoc Loc;
2824   // Based on the initial passed values, we may be in any of these cases, we are
2825   // in one of these cases (with current position (*)):
2826 
2827   //   1. seg : * disp  (base-index-scale-expr)
2828   //   2. seg : *(disp) (base-index-scale-expr)
2829   //   3. seg :       *(base-index-scale-expr)
2830   //   4.        disp  *(base-index-scale-expr)
2831   //   5.      *(disp)  (base-index-scale-expr)
2832   //   6.             *(base-index-scale-expr)
2833   //   7.  disp *
2834   //   8. *(disp)
2835 
2836   // If we do not have an displacement yet, check if we're in cases 4 or 6 by
2837   // checking if the first object after the parenthesis is a register (or an
2838   // identifier referring to a register) and parse the displacement or default
2839   // to 0 as appropriate.
2840   auto isAtMemOperand = [this]() {
2841     if (this->getLexer().isNot(AsmToken::LParen))
2842       return false;
2843     AsmToken Buf[2];
2844     StringRef Id;
2845     auto TokCount = this->getLexer().peekTokens(Buf, true);
2846     if (TokCount == 0)
2847       return false;
2848     switch (Buf[0].getKind()) {
2849     case AsmToken::Percent:
2850     case AsmToken::Comma:
2851       return true;
2852     // These lower cases are doing a peekIdentifier.
2853     case AsmToken::At:
2854     case AsmToken::Dollar:
2855       if ((TokCount > 1) &&
2856           (Buf[1].is(AsmToken::Identifier) || Buf[1].is(AsmToken::String)) &&
2857           (Buf[0].getLoc().getPointer() + 1 == Buf[1].getLoc().getPointer()))
2858         Id = StringRef(Buf[0].getLoc().getPointer(),
2859                        Buf[1].getIdentifier().size() + 1);
2860       break;
2861     case AsmToken::Identifier:
2862     case AsmToken::String:
2863       Id = Buf[0].getIdentifier();
2864       break;
2865     default:
2866       return false;
2867     }
2868     // We have an ID. Check if it is bound to a register.
2869     if (!Id.empty()) {
2870       MCSymbol *Sym = this->getContext().getOrCreateSymbol(Id);
2871       if (Sym->isVariable()) {
2872         auto V = Sym->getVariableValue(/*SetUsed*/ false);
2873         return isa<X86MCExpr>(V);
2874       }
2875     }
2876     return false;
2877   };
2878 
2879   if (!Disp) {
2880     // Parse immediate if we're not at a mem operand yet.
2881     if (!isAtMemOperand()) {
2882       if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Disp, EndLoc))
2883         return true;
2884       assert(!isa<X86MCExpr>(Disp) && "Expected non-register here.");
2885     } else {
2886       // Disp is implicitly zero if we haven't parsed it yet.
2887       Disp = MCConstantExpr::create(0, Parser.getContext());
2888     }
2889   }
2890 
2891   // We are now either at the end of the operand or at the '(' at the start of a
2892   // base-index-scale-expr.
2893 
2894   if (!parseOptionalToken(AsmToken::LParen)) {
2895     if (SegReg == 0)
2896       Operands.push_back(
2897           X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
2898     else
2899       Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
2900                                                0, 0, 1, StartLoc, EndLoc));
2901     return false;
2902   }
2903 
2904   // If we reached here, then eat the '(' and Process
2905   // the rest of the memory operand.
2906   unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
2907   SMLoc BaseLoc = getLexer().getLoc();
2908   const MCExpr *E;
2909   StringRef ErrMsg;
2910 
2911   // Parse BaseReg if one is provided.
2912   if (getLexer().isNot(AsmToken::Comma) && getLexer().isNot(AsmToken::RParen)) {
2913     if (Parser.parseExpression(E, EndLoc) ||
2914         check(!isa<X86MCExpr>(E), BaseLoc, "expected register here"))
2915       return true;
2916 
2917     // Sanity check register.
2918     BaseReg = cast<X86MCExpr>(E)->getRegNo();
2919     if (BaseReg == X86::EIZ || BaseReg == X86::RIZ)
2920       return Error(BaseLoc, "eiz and riz can only be used as index registers",
2921                    SMRange(BaseLoc, EndLoc));
2922   }
2923 
2924   if (parseOptionalToken(AsmToken::Comma)) {
2925     // Following the comma we should have either an index register, or a scale
2926     // value. We don't support the later form, but we want to parse it
2927     // correctly.
2928     //
2929     // Even though it would be completely consistent to support syntax like
2930     // "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this.
2931     if (getLexer().isNot(AsmToken::RParen)) {
2932       if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc))
2933         return true;
2934 
2935       if (!isa<X86MCExpr>(E)) {
2936         // We've parsed an unexpected Scale Value instead of an index
2937         // register. Interpret it as an absolute.
2938         int64_t ScaleVal;
2939         if (!E->evaluateAsAbsolute(ScaleVal, getStreamer().getAssemblerPtr()))
2940           return Error(Loc, "expected absolute expression");
2941         if (ScaleVal != 1)
2942           Warning(Loc, "scale factor without index register is ignored");
2943         Scale = 1;
2944       } else { // IndexReg Found.
2945         IndexReg = cast<X86MCExpr>(E)->getRegNo();
2946 
2947         if (BaseReg == X86::RIP)
2948           return Error(Loc,
2949                        "%rip as base register can not have an index register");
2950         if (IndexReg == X86::RIP)
2951           return Error(Loc, "%rip is not allowed as an index register");
2952 
2953         if (parseOptionalToken(AsmToken::Comma)) {
2954           // Parse the scale amount:
2955           //  ::= ',' [scale-expression]
2956 
2957           // A scale amount without an index is ignored.
2958           if (getLexer().isNot(AsmToken::RParen)) {
2959             int64_t ScaleVal;
2960             if (Parser.parseTokenLoc(Loc) ||
2961                 Parser.parseAbsoluteExpression(ScaleVal))
2962               return Error(Loc, "expected scale expression");
2963             Scale = (unsigned)ScaleVal;
2964             // Validate the scale amount.
2965             if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
2966                 Scale != 1)
2967               return Error(Loc, "scale factor in 16-bit address must be 1");
2968             if (checkScale(Scale, ErrMsg))
2969               return Error(Loc, ErrMsg);
2970           }
2971         }
2972       }
2973     }
2974   }
2975 
2976   // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
2977   if (parseToken(AsmToken::RParen, "unexpected token in memory operand"))
2978     return true;
2979 
2980   // This is to support otherwise illegal operand (%dx) found in various
2981   // unofficial manuals examples (e.g. "out[s]?[bwl]? %al, (%dx)") and must now
2982   // be supported. Mark such DX variants separately fix only in special cases.
2983   if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 &&
2984       isa<MCConstantExpr>(Disp) &&
2985       cast<MCConstantExpr>(Disp)->getValue() == 0) {
2986     Operands.push_back(X86Operand::CreateDXReg(BaseLoc, BaseLoc));
2987     return false;
2988   }
2989 
2990   if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(),
2991                                       ErrMsg))
2992     return Error(BaseLoc, ErrMsg);
2993 
2994   if (SegReg || BaseReg || IndexReg)
2995     Operands.push_back(X86Operand::CreateMem(getPointerWidth(), SegReg, Disp,
2996                                              BaseReg, IndexReg, Scale, StartLoc,
2997                                              EndLoc));
2998   else
2999     Operands.push_back(
3000         X86Operand::CreateMem(getPointerWidth(), Disp, StartLoc, EndLoc));
3001   return false;
3002 }
3003 
3004 // Parse either a standard primary expression or a register.
3005 bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
3006   MCAsmParser &Parser = getParser();
3007   // See if this is a register first.
3008   if (getTok().is(AsmToken::Percent) ||
3009       (isParsingIntelSyntax() && getTok().is(AsmToken::Identifier) &&
3010        MatchRegisterName(Parser.getTok().getString()))) {
3011     SMLoc StartLoc = Parser.getTok().getLoc();
3012     unsigned RegNo;
3013     if (ParseRegister(RegNo, StartLoc, EndLoc))
3014       return true;
3015     Res = X86MCExpr::create(RegNo, Parser.getContext());
3016     return false;
3017   }
3018   return Parser.parsePrimaryExpr(Res, EndLoc, nullptr);
3019 }
3020 
3021 bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
3022                                     SMLoc NameLoc, OperandVector &Operands) {
3023   MCAsmParser &Parser = getParser();
3024   InstInfo = &Info;
3025 
3026   // Reset the forced VEX encoding.
3027   ForcedVEXEncoding = VEXEncoding_Default;
3028   ForcedDispEncoding = DispEncoding_Default;
3029 
3030   // Parse pseudo prefixes.
3031   while (1) {
3032     if (Name == "{") {
3033       if (getLexer().isNot(AsmToken::Identifier))
3034         return Error(Parser.getTok().getLoc(), "Unexpected token after '{'");
3035       std::string Prefix = Parser.getTok().getString().lower();
3036       Parser.Lex(); // Eat identifier.
3037       if (getLexer().isNot(AsmToken::RCurly))
3038         return Error(Parser.getTok().getLoc(), "Expected '}'");
3039       Parser.Lex(); // Eat curly.
3040 
3041       if (Prefix == "vex")
3042         ForcedVEXEncoding = VEXEncoding_VEX;
3043       else if (Prefix == "vex2")
3044         ForcedVEXEncoding = VEXEncoding_VEX2;
3045       else if (Prefix == "vex3")
3046         ForcedVEXEncoding = VEXEncoding_VEX3;
3047       else if (Prefix == "evex")
3048         ForcedVEXEncoding = VEXEncoding_EVEX;
3049       else if (Prefix == "disp8")
3050         ForcedDispEncoding = DispEncoding_Disp8;
3051       else if (Prefix == "disp32")
3052         ForcedDispEncoding = DispEncoding_Disp32;
3053       else
3054         return Error(NameLoc, "unknown prefix");
3055 
3056       NameLoc = Parser.getTok().getLoc();
3057       if (getLexer().is(AsmToken::LCurly)) {
3058         Parser.Lex();
3059         Name = "{";
3060       } else {
3061         if (getLexer().isNot(AsmToken::Identifier))
3062           return Error(Parser.getTok().getLoc(), "Expected identifier");
3063         // FIXME: The mnemonic won't match correctly if its not in lower case.
3064         Name = Parser.getTok().getString();
3065         Parser.Lex();
3066       }
3067       continue;
3068     }
3069     // Parse MASM style pseudo prefixes.
3070     if (isParsingMSInlineAsm()) {
3071       if (Name.equals_insensitive("vex"))
3072         ForcedVEXEncoding = VEXEncoding_VEX;
3073       else if (Name.equals_insensitive("vex2"))
3074         ForcedVEXEncoding = VEXEncoding_VEX2;
3075       else if (Name.equals_insensitive("vex3"))
3076         ForcedVEXEncoding = VEXEncoding_VEX3;
3077       else if (Name.equals_insensitive("evex"))
3078         ForcedVEXEncoding = VEXEncoding_EVEX;
3079 
3080       if (ForcedVEXEncoding != VEXEncoding_Default) {
3081         if (getLexer().isNot(AsmToken::Identifier))
3082           return Error(Parser.getTok().getLoc(), "Expected identifier");
3083         // FIXME: The mnemonic won't match correctly if its not in lower case.
3084         Name = Parser.getTok().getString();
3085         NameLoc = Parser.getTok().getLoc();
3086         Parser.Lex();
3087       }
3088     }
3089     break;
3090   }
3091 
3092   // Support the suffix syntax for overriding displacement size as well.
3093   if (Name.consume_back(".d32")) {
3094     ForcedDispEncoding = DispEncoding_Disp32;
3095   } else if (Name.consume_back(".d8")) {
3096     ForcedDispEncoding = DispEncoding_Disp8;
3097   }
3098 
3099   StringRef PatchedName = Name;
3100 
3101   // Hack to skip "short" following Jcc.
3102   if (isParsingIntelSyntax() &&
3103       (PatchedName == "jmp" || PatchedName == "jc" || PatchedName == "jnc" ||
3104        PatchedName == "jcxz" || PatchedName == "jecxz" ||
3105        (PatchedName.startswith("j") &&
3106         ParseConditionCode(PatchedName.substr(1)) != X86::COND_INVALID))) {
3107     StringRef NextTok = Parser.getTok().getString();
3108     if (Parser.isParsingMasm() ? NextTok.equals_insensitive("short")
3109                                : NextTok == "short") {
3110       SMLoc NameEndLoc =
3111           NameLoc.getFromPointer(NameLoc.getPointer() + Name.size());
3112       // Eat the short keyword.
3113       Parser.Lex();
3114       // MS and GAS ignore the short keyword; they both determine the jmp type
3115       // based on the distance of the label. (NASM does emit different code with
3116       // and without "short," though.)
3117       InstInfo->AsmRewrites->emplace_back(AOK_Skip, NameEndLoc,
3118                                           NextTok.size() + 1);
3119     }
3120   }
3121 
3122   // FIXME: Hack to recognize setneb as setne.
3123   if (PatchedName.startswith("set") && PatchedName.endswith("b") &&
3124       PatchedName != "setb" && PatchedName != "setnb")
3125     PatchedName = PatchedName.substr(0, Name.size()-1);
3126 
3127   unsigned ComparisonPredicate = ~0U;
3128 
3129   // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
3130   if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
3131       (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
3132        PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
3133     bool IsVCMP = PatchedName[0] == 'v';
3134     unsigned CCIdx = IsVCMP ? 4 : 3;
3135     unsigned CC = StringSwitch<unsigned>(
3136       PatchedName.slice(CCIdx, PatchedName.size() - 2))
3137       .Case("eq",       0x00)
3138       .Case("eq_oq",    0x00)
3139       .Case("lt",       0x01)
3140       .Case("lt_os",    0x01)
3141       .Case("le",       0x02)
3142       .Case("le_os",    0x02)
3143       .Case("unord",    0x03)
3144       .Case("unord_q",  0x03)
3145       .Case("neq",      0x04)
3146       .Case("neq_uq",   0x04)
3147       .Case("nlt",      0x05)
3148       .Case("nlt_us",   0x05)
3149       .Case("nle",      0x06)
3150       .Case("nle_us",   0x06)
3151       .Case("ord",      0x07)
3152       .Case("ord_q",    0x07)
3153       /* AVX only from here */
3154       .Case("eq_uq",    0x08)
3155       .Case("nge",      0x09)
3156       .Case("nge_us",   0x09)
3157       .Case("ngt",      0x0A)
3158       .Case("ngt_us",   0x0A)
3159       .Case("false",    0x0B)
3160       .Case("false_oq", 0x0B)
3161       .Case("neq_oq",   0x0C)
3162       .Case("ge",       0x0D)
3163       .Case("ge_os",    0x0D)
3164       .Case("gt",       0x0E)
3165       .Case("gt_os",    0x0E)
3166       .Case("true",     0x0F)
3167       .Case("true_uq",  0x0F)
3168       .Case("eq_os",    0x10)
3169       .Case("lt_oq",    0x11)
3170       .Case("le_oq",    0x12)
3171       .Case("unord_s",  0x13)
3172       .Case("neq_us",   0x14)
3173       .Case("nlt_uq",   0x15)
3174       .Case("nle_uq",   0x16)
3175       .Case("ord_s",    0x17)
3176       .Case("eq_us",    0x18)
3177       .Case("nge_uq",   0x19)
3178       .Case("ngt_uq",   0x1A)
3179       .Case("false_os", 0x1B)
3180       .Case("neq_os",   0x1C)
3181       .Case("ge_oq",    0x1D)
3182       .Case("gt_oq",    0x1E)
3183       .Case("true_us",  0x1F)
3184       .Default(~0U);
3185     if (CC != ~0U && (IsVCMP || CC < 8)) {
3186       if (PatchedName.endswith("ss"))
3187         PatchedName = IsVCMP ? "vcmpss" : "cmpss";
3188       else if (PatchedName.endswith("sd"))
3189         PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
3190       else if (PatchedName.endswith("ps"))
3191         PatchedName = IsVCMP ? "vcmpps" : "cmpps";
3192       else if (PatchedName.endswith("pd"))
3193         PatchedName = IsVCMP ? "vcmppd" : "cmppd";
3194       else
3195         llvm_unreachable("Unexpected suffix!");
3196 
3197       ComparisonPredicate = CC;
3198     }
3199   }
3200 
3201   // FIXME: Hack to recognize vpcmp<comparison code>{ub,uw,ud,uq,b,w,d,q}.
3202   if (PatchedName.startswith("vpcmp") &&
3203       (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
3204        PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
3205     unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
3206     unsigned CC = StringSwitch<unsigned>(
3207       PatchedName.slice(5, PatchedName.size() - SuffixSize))
3208       .Case("eq",    0x0) // Only allowed on unsigned. Checked below.
3209       .Case("lt",    0x1)
3210       .Case("le",    0x2)
3211       //.Case("false", 0x3) // Not a documented alias.
3212       .Case("neq",   0x4)
3213       .Case("nlt",   0x5)
3214       .Case("nle",   0x6)
3215       //.Case("true",  0x7) // Not a documented alias.
3216       .Default(~0U);
3217     if (CC != ~0U && (CC != 0 || SuffixSize == 2)) {
3218       switch (PatchedName.back()) {
3219       default: llvm_unreachable("Unexpected character!");
3220       case 'b': PatchedName = SuffixSize == 2 ? "vpcmpub" : "vpcmpb"; break;
3221       case 'w': PatchedName = SuffixSize == 2 ? "vpcmpuw" : "vpcmpw"; break;
3222       case 'd': PatchedName = SuffixSize == 2 ? "vpcmpud" : "vpcmpd"; break;
3223       case 'q': PatchedName = SuffixSize == 2 ? "vpcmpuq" : "vpcmpq"; break;
3224       }
3225       // Set up the immediate to push into the operands later.
3226       ComparisonPredicate = CC;
3227     }
3228   }
3229 
3230   // FIXME: Hack to recognize vpcom<comparison code>{ub,uw,ud,uq,b,w,d,q}.
3231   if (PatchedName.startswith("vpcom") &&
3232       (PatchedName.back() == 'b' || PatchedName.back() == 'w' ||
3233        PatchedName.back() == 'd' || PatchedName.back() == 'q')) {
3234     unsigned SuffixSize = PatchedName.drop_back().back() == 'u' ? 2 : 1;
3235     unsigned CC = StringSwitch<unsigned>(
3236       PatchedName.slice(5, PatchedName.size() - SuffixSize))
3237       .Case("lt",    0x0)
3238       .Case("le",    0x1)
3239       .Case("gt",    0x2)
3240       .Case("ge",    0x3)
3241       .Case("eq",    0x4)
3242       .Case("neq",   0x5)
3243       .Case("false", 0x6)
3244       .Case("true",  0x7)
3245       .Default(~0U);
3246     if (CC != ~0U) {
3247       switch (PatchedName.back()) {
3248       default: llvm_unreachable("Unexpected character!");
3249       case 'b': PatchedName = SuffixSize == 2 ? "vpcomub" : "vpcomb"; break;
3250       case 'w': PatchedName = SuffixSize == 2 ? "vpcomuw" : "vpcomw"; break;
3251       case 'd': PatchedName = SuffixSize == 2 ? "vpcomud" : "vpcomd"; break;
3252       case 'q': PatchedName = SuffixSize == 2 ? "vpcomuq" : "vpcomq"; break;
3253       }
3254       // Set up the immediate to push into the operands later.
3255       ComparisonPredicate = CC;
3256     }
3257   }
3258 
3259 
3260   // Determine whether this is an instruction prefix.
3261   // FIXME:
3262   // Enhance prefixes integrity robustness. for example, following forms
3263   // are currently tolerated:
3264   // repz repnz <insn>    ; GAS errors for the use of two similar prefixes
3265   // lock addq %rax, %rbx ; Destination operand must be of memory type
3266   // xacquire <insn>      ; xacquire must be accompanied by 'lock'
3267   bool IsPrefix =
3268       StringSwitch<bool>(Name)
3269           .Cases("cs", "ds", "es", "fs", "gs", "ss", true)
3270           .Cases("rex64", "data32", "data16", "addr32", "addr16", true)
3271           .Cases("xacquire", "xrelease", true)
3272           .Cases("acquire", "release", isParsingIntelSyntax())
3273           .Default(false);
3274 
3275   auto isLockRepeatNtPrefix = [](StringRef N) {
3276     return StringSwitch<bool>(N)
3277         .Cases("lock", "rep", "repe", "repz", "repne", "repnz", "notrack", true)
3278         .Default(false);
3279   };
3280 
3281   bool CurlyAsEndOfStatement = false;
3282 
3283   unsigned Flags = X86::IP_NO_PREFIX;
3284   while (isLockRepeatNtPrefix(Name.lower())) {
3285     unsigned Prefix =
3286         StringSwitch<unsigned>(Name)
3287             .Cases("lock", "lock", X86::IP_HAS_LOCK)
3288             .Cases("rep", "repe", "repz", X86::IP_HAS_REPEAT)
3289             .Cases("repne", "repnz", X86::IP_HAS_REPEAT_NE)
3290             .Cases("notrack", "notrack", X86::IP_HAS_NOTRACK)
3291             .Default(X86::IP_NO_PREFIX); // Invalid prefix (impossible)
3292     Flags |= Prefix;
3293     if (getLexer().is(AsmToken::EndOfStatement)) {
3294       // We don't have real instr with the given prefix
3295       //  let's use the prefix as the instr.
3296       // TODO: there could be several prefixes one after another
3297       Flags = X86::IP_NO_PREFIX;
3298       break;
3299     }
3300     // FIXME: The mnemonic won't match correctly if its not in lower case.
3301     Name = Parser.getTok().getString();
3302     Parser.Lex(); // eat the prefix
3303     // Hack: we could have something like "rep # some comment" or
3304     //    "lock; cmpxchg16b $1" or "lock\0A\09incl" or "lock/incl"
3305     while (Name.startswith(";") || Name.startswith("\n") ||
3306            Name.startswith("#") || Name.startswith("\t") ||
3307            Name.startswith("/")) {
3308       // FIXME: The mnemonic won't match correctly if its not in lower case.
3309       Name = Parser.getTok().getString();
3310       Parser.Lex(); // go to next prefix or instr
3311     }
3312   }
3313 
3314   if (Flags)
3315     PatchedName = Name;
3316 
3317   // Hacks to handle 'data16' and 'data32'
3318   if (PatchedName == "data16" && is16BitMode()) {
3319     return Error(NameLoc, "redundant data16 prefix");
3320   }
3321   if (PatchedName == "data32") {
3322     if (is32BitMode())
3323       return Error(NameLoc, "redundant data32 prefix");
3324     if (is64BitMode())
3325       return Error(NameLoc, "'data32' is not supported in 64-bit mode");
3326     // Hack to 'data16' for the table lookup.
3327     PatchedName = "data16";
3328 
3329     if (getLexer().isNot(AsmToken::EndOfStatement)) {
3330       StringRef Next = Parser.getTok().getString();
3331       getLexer().Lex();
3332       // data32 effectively changes the instruction suffix.
3333       // TODO Generalize.
3334       if (Next == "callw")
3335         Next = "calll";
3336       if (Next == "ljmpw")
3337         Next = "ljmpl";
3338 
3339       Name = Next;
3340       PatchedName = Name;
3341       ForcedDataPrefix = X86::Mode32Bit;
3342       IsPrefix = false;
3343     }
3344   }
3345 
3346   Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
3347 
3348   // Push the immediate if we extracted one from the mnemonic.
3349   if (ComparisonPredicate != ~0U && !isParsingIntelSyntax()) {
3350     const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
3351                                                  getParser().getContext());
3352     Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
3353   }
3354 
3355   // This does the actual operand parsing.  Don't parse any more if we have a
3356   // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
3357   // just want to parse the "lock" as the first instruction and the "incl" as
3358   // the next one.
3359   if (getLexer().isNot(AsmToken::EndOfStatement) && !IsPrefix) {
3360     // Parse '*' modifier.
3361     if (getLexer().is(AsmToken::Star))
3362       Operands.push_back(X86Operand::CreateToken("*", consumeToken()));
3363 
3364     // Read the operands.
3365     while(1) {
3366       if (ParseOperand(Operands))
3367         return true;
3368       if (HandleAVX512Operand(Operands))
3369         return true;
3370 
3371       // check for comma and eat it
3372       if (getLexer().is(AsmToken::Comma))
3373         Parser.Lex();
3374       else
3375         break;
3376      }
3377 
3378     // In MS inline asm curly braces mark the beginning/end of a block,
3379     // therefore they should be interepreted as end of statement
3380     CurlyAsEndOfStatement =
3381         isParsingIntelSyntax() && isParsingMSInlineAsm() &&
3382         (getLexer().is(AsmToken::LCurly) || getLexer().is(AsmToken::RCurly));
3383     if (getLexer().isNot(AsmToken::EndOfStatement) && !CurlyAsEndOfStatement)
3384       return TokError("unexpected token in argument list");
3385   }
3386 
3387   // Push the immediate if we extracted one from the mnemonic.
3388   if (ComparisonPredicate != ~0U && isParsingIntelSyntax()) {
3389     const MCExpr *ImmOp = MCConstantExpr::create(ComparisonPredicate,
3390                                                  getParser().getContext());
3391     Operands.push_back(X86Operand::CreateImm(ImmOp, NameLoc, NameLoc));
3392   }
3393 
3394   // Consume the EndOfStatement or the prefix separator Slash
3395   if (getLexer().is(AsmToken::EndOfStatement) ||
3396       (IsPrefix && getLexer().is(AsmToken::Slash)))
3397     Parser.Lex();
3398   else if (CurlyAsEndOfStatement)
3399     // Add an actual EndOfStatement before the curly brace
3400     Info.AsmRewrites->emplace_back(AOK_EndOfStatement,
3401                                    getLexer().getTok().getLoc(), 0);
3402 
3403   // This is for gas compatibility and cannot be done in td.
3404   // Adding "p" for some floating point with no argument.
3405   // For example: fsub --> fsubp
3406   bool IsFp =
3407     Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
3408   if (IsFp && Operands.size() == 1) {
3409     const char *Repl = StringSwitch<const char *>(Name)
3410       .Case("fsub", "fsubp")
3411       .Case("fdiv", "fdivp")
3412       .Case("fsubr", "fsubrp")
3413       .Case("fdivr", "fdivrp");
3414     static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
3415   }
3416 
3417   if ((Name == "mov" || Name == "movw" || Name == "movl") &&
3418       (Operands.size() == 3)) {
3419     X86Operand &Op1 = (X86Operand &)*Operands[1];
3420     X86Operand &Op2 = (X86Operand &)*Operands[2];
3421     SMLoc Loc = Op1.getEndLoc();
3422     // Moving a 32 or 16 bit value into a segment register has the same
3423     // behavior. Modify such instructions to always take shorter form.
3424     if (Op1.isReg() && Op2.isReg() &&
3425         X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(
3426             Op2.getReg()) &&
3427         (X86MCRegisterClasses[X86::GR16RegClassID].contains(Op1.getReg()) ||
3428          X86MCRegisterClasses[X86::GR32RegClassID].contains(Op1.getReg()))) {
3429       // Change instruction name to match new instruction.
3430       if (Name != "mov" && Name[3] == (is16BitMode() ? 'l' : 'w')) {
3431         Name = is16BitMode() ? "movw" : "movl";
3432         Operands[0] = X86Operand::CreateToken(Name, NameLoc);
3433       }
3434       // Select the correct equivalent 16-/32-bit source register.
3435       unsigned Reg =
3436           getX86SubSuperRegisterOrZero(Op1.getReg(), is16BitMode() ? 16 : 32);
3437       Operands[1] = X86Operand::CreateReg(Reg, Loc, Loc);
3438     }
3439   }
3440 
3441   // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" ->
3442   // "outb %al, %dx".  Out doesn't take a memory form, but this is a widely
3443   // documented form in various unofficial manuals, so a lot of code uses it.
3444   if ((Name == "outb" || Name == "outsb" || Name == "outw" || Name == "outsw" ||
3445        Name == "outl" || Name == "outsl" || Name == "out" || Name == "outs") &&
3446       Operands.size() == 3) {
3447     X86Operand &Op = (X86Operand &)*Operands.back();
3448     if (Op.isDXReg())
3449       Operands.back() = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
3450                                               Op.getEndLoc());
3451   }
3452   // Same hack for "in[s]?[bwl]? (%dx), %al" -> "inb %dx, %al".
3453   if ((Name == "inb" || Name == "insb" || Name == "inw" || Name == "insw" ||
3454        Name == "inl" || Name == "insl" || Name == "in" || Name == "ins") &&
3455       Operands.size() == 3) {
3456     X86Operand &Op = (X86Operand &)*Operands[1];
3457     if (Op.isDXReg())
3458       Operands[1] = X86Operand::CreateReg(X86::DX, Op.getStartLoc(),
3459                                           Op.getEndLoc());
3460   }
3461 
3462   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 2> TmpOperands;
3463   bool HadVerifyError = false;
3464 
3465   // Append default arguments to "ins[bwld]"
3466   if (Name.startswith("ins") &&
3467       (Operands.size() == 1 || Operands.size() == 3) &&
3468       (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd" ||
3469        Name == "ins")) {
3470 
3471     AddDefaultSrcDestOperands(TmpOperands,
3472                               X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
3473                               DefaultMemDIOperand(NameLoc));
3474     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3475   }
3476 
3477   // Append default arguments to "outs[bwld]"
3478   if (Name.startswith("outs") &&
3479       (Operands.size() == 1 || Operands.size() == 3) &&
3480       (Name == "outsb" || Name == "outsw" || Name == "outsl" ||
3481        Name == "outsd" || Name == "outs")) {
3482     AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3483                               X86Operand::CreateReg(X86::DX, NameLoc, NameLoc));
3484     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3485   }
3486 
3487   // Transform "lods[bwlq]" into "lods[bwlq] ($SIREG)" for appropriate
3488   // values of $SIREG according to the mode. It would be nice if this
3489   // could be achieved with InstAlias in the tables.
3490   if (Name.startswith("lods") &&
3491       (Operands.size() == 1 || Operands.size() == 2) &&
3492       (Name == "lods" || Name == "lodsb" || Name == "lodsw" ||
3493        Name == "lodsl" || Name == "lodsd" || Name == "lodsq")) {
3494     TmpOperands.push_back(DefaultMemSIOperand(NameLoc));
3495     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3496   }
3497 
3498   // Transform "stos[bwlq]" into "stos[bwlq] ($DIREG)" for appropriate
3499   // values of $DIREG according to the mode. It would be nice if this
3500   // could be achieved with InstAlias in the tables.
3501   if (Name.startswith("stos") &&
3502       (Operands.size() == 1 || Operands.size() == 2) &&
3503       (Name == "stos" || Name == "stosb" || Name == "stosw" ||
3504        Name == "stosl" || Name == "stosd" || Name == "stosq")) {
3505     TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3506     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3507   }
3508 
3509   // Transform "scas[bwlq]" into "scas[bwlq] ($DIREG)" for appropriate
3510   // values of $DIREG according to the mode. It would be nice if this
3511   // could be achieved with InstAlias in the tables.
3512   if (Name.startswith("scas") &&
3513       (Operands.size() == 1 || Operands.size() == 2) &&
3514       (Name == "scas" || Name == "scasb" || Name == "scasw" ||
3515        Name == "scasl" || Name == "scasd" || Name == "scasq")) {
3516     TmpOperands.push_back(DefaultMemDIOperand(NameLoc));
3517     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3518   }
3519 
3520   // Add default SI and DI operands to "cmps[bwlq]".
3521   if (Name.startswith("cmps") &&
3522       (Operands.size() == 1 || Operands.size() == 3) &&
3523       (Name == "cmps" || Name == "cmpsb" || Name == "cmpsw" ||
3524        Name == "cmpsl" || Name == "cmpsd" || Name == "cmpsq")) {
3525     AddDefaultSrcDestOperands(TmpOperands, DefaultMemDIOperand(NameLoc),
3526                               DefaultMemSIOperand(NameLoc));
3527     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3528   }
3529 
3530   // Add default SI and DI operands to "movs[bwlq]".
3531   if (((Name.startswith("movs") &&
3532         (Name == "movs" || Name == "movsb" || Name == "movsw" ||
3533          Name == "movsl" || Name == "movsd" || Name == "movsq")) ||
3534        (Name.startswith("smov") &&
3535         (Name == "smov" || Name == "smovb" || Name == "smovw" ||
3536          Name == "smovl" || Name == "smovd" || Name == "smovq"))) &&
3537       (Operands.size() == 1 || Operands.size() == 3)) {
3538     if (Name == "movsd" && Operands.size() == 1 && !isParsingIntelSyntax())
3539       Operands.back() = X86Operand::CreateToken("movsl", NameLoc);
3540     AddDefaultSrcDestOperands(TmpOperands, DefaultMemSIOperand(NameLoc),
3541                               DefaultMemDIOperand(NameLoc));
3542     HadVerifyError = VerifyAndAdjustOperands(Operands, TmpOperands);
3543   }
3544 
3545   // Check if we encountered an error for one the string insturctions
3546   if (HadVerifyError) {
3547     return HadVerifyError;
3548   }
3549 
3550   // Transforms "xlat mem8" into "xlatb"
3551   if ((Name == "xlat" || Name == "xlatb") && Operands.size() == 2) {
3552     X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
3553     if (Op1.isMem8()) {
3554       Warning(Op1.getStartLoc(), "memory operand is only for determining the "
3555                                  "size, (R|E)BX will be used for the location");
3556       Operands.pop_back();
3557       static_cast<X86Operand &>(*Operands[0]).setTokenValue("xlatb");
3558     }
3559   }
3560 
3561   if (Flags)
3562     Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc));
3563   return false;
3564 }
3565 
3566 bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
3567   const MCRegisterInfo *MRI = getContext().getRegisterInfo();
3568 
3569   switch (Inst.getOpcode()) {
3570   default: return false;
3571   case X86::JMP_1:
3572     // {disp32} forces a larger displacement as if the instruction was relaxed.
3573     // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
3574     // This matches GNU assembler.
3575     if (ForcedDispEncoding == DispEncoding_Disp32) {
3576       Inst.setOpcode(is16BitMode() ? X86::JMP_2 : X86::JMP_4);
3577       return true;
3578     }
3579 
3580     return false;
3581   case X86::JCC_1:
3582     // {disp32} forces a larger displacement as if the instruction was relaxed.
3583     // NOTE: 16-bit mode uses 16-bit displacement even though it says {disp32}.
3584     // This matches GNU assembler.
3585     if (ForcedDispEncoding == DispEncoding_Disp32) {
3586       Inst.setOpcode(is16BitMode() ? X86::JCC_2 : X86::JCC_4);
3587       return true;
3588     }
3589 
3590     return false;
3591   case X86::VMOVZPQILo2PQIrr:
3592   case X86::VMOVAPDrr:
3593   case X86::VMOVAPDYrr:
3594   case X86::VMOVAPSrr:
3595   case X86::VMOVAPSYrr:
3596   case X86::VMOVDQArr:
3597   case X86::VMOVDQAYrr:
3598   case X86::VMOVDQUrr:
3599   case X86::VMOVDQUYrr:
3600   case X86::VMOVUPDrr:
3601   case X86::VMOVUPDYrr:
3602   case X86::VMOVUPSrr:
3603   case X86::VMOVUPSYrr: {
3604     // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
3605     // the registers is extended, but other isn't.
3606     if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3607         MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
3608         MRI->getEncodingValue(Inst.getOperand(1).getReg()) < 8)
3609       return false;
3610 
3611     unsigned NewOpc;
3612     switch (Inst.getOpcode()) {
3613     default: llvm_unreachable("Invalid opcode");
3614     case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr;   break;
3615     case X86::VMOVAPDrr:        NewOpc = X86::VMOVAPDrr_REV;  break;
3616     case X86::VMOVAPDYrr:       NewOpc = X86::VMOVAPDYrr_REV; break;
3617     case X86::VMOVAPSrr:        NewOpc = X86::VMOVAPSrr_REV;  break;
3618     case X86::VMOVAPSYrr:       NewOpc = X86::VMOVAPSYrr_REV; break;
3619     case X86::VMOVDQArr:        NewOpc = X86::VMOVDQArr_REV;  break;
3620     case X86::VMOVDQAYrr:       NewOpc = X86::VMOVDQAYrr_REV; break;
3621     case X86::VMOVDQUrr:        NewOpc = X86::VMOVDQUrr_REV;  break;
3622     case X86::VMOVDQUYrr:       NewOpc = X86::VMOVDQUYrr_REV; break;
3623     case X86::VMOVUPDrr:        NewOpc = X86::VMOVUPDrr_REV;  break;
3624     case X86::VMOVUPDYrr:       NewOpc = X86::VMOVUPDYrr_REV; break;
3625     case X86::VMOVUPSrr:        NewOpc = X86::VMOVUPSrr_REV;  break;
3626     case X86::VMOVUPSYrr:       NewOpc = X86::VMOVUPSYrr_REV; break;
3627     }
3628     Inst.setOpcode(NewOpc);
3629     return true;
3630   }
3631   case X86::VMOVSDrr:
3632   case X86::VMOVSSrr: {
3633     // We can get a smaller encoding by using VEX.R instead of VEX.B if one of
3634     // the registers is extended, but other isn't.
3635     if (ForcedVEXEncoding == VEXEncoding_VEX3 ||
3636         MRI->getEncodingValue(Inst.getOperand(0).getReg()) >= 8 ||
3637         MRI->getEncodingValue(Inst.getOperand(2).getReg()) < 8)
3638       return false;
3639 
3640     unsigned NewOpc;
3641     switch (Inst.getOpcode()) {
3642     default: llvm_unreachable("Invalid opcode");
3643     case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break;
3644     case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break;
3645     }
3646     Inst.setOpcode(NewOpc);
3647     return true;
3648   }
3649   case X86::RCR8ri: case X86::RCR16ri: case X86::RCR32ri: case X86::RCR64ri:
3650   case X86::RCL8ri: case X86::RCL16ri: case X86::RCL32ri: case X86::RCL64ri:
3651   case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
3652   case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
3653   case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
3654   case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
3655   case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri: {
3656     // Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
3657     // FIXME: It would be great if we could just do this with an InstAlias.
3658     if (!Inst.getOperand(2).isImm() || Inst.getOperand(2).getImm() != 1)
3659       return false;
3660 
3661     unsigned NewOpc;
3662     switch (Inst.getOpcode()) {
3663     default: llvm_unreachable("Invalid opcode");
3664     case X86::RCR8ri:  NewOpc = X86::RCR8r1;  break;
3665     case X86::RCR16ri: NewOpc = X86::RCR16r1; break;
3666     case X86::RCR32ri: NewOpc = X86::RCR32r1; break;
3667     case X86::RCR64ri: NewOpc = X86::RCR64r1; break;
3668     case X86::RCL8ri:  NewOpc = X86::RCL8r1;  break;
3669     case X86::RCL16ri: NewOpc = X86::RCL16r1; break;
3670     case X86::RCL32ri: NewOpc = X86::RCL32r1; break;
3671     case X86::RCL64ri: NewOpc = X86::RCL64r1; break;
3672     case X86::ROR8ri:  NewOpc = X86::ROR8r1;  break;
3673     case X86::ROR16ri: NewOpc = X86::ROR16r1; break;
3674     case X86::ROR32ri: NewOpc = X86::ROR32r1; break;
3675     case X86::ROR64ri: NewOpc = X86::ROR64r1; break;
3676     case X86::ROL8ri:  NewOpc = X86::ROL8r1;  break;
3677     case X86::ROL16ri: NewOpc = X86::ROL16r1; break;
3678     case X86::ROL32ri: NewOpc = X86::ROL32r1; break;
3679     case X86::ROL64ri: NewOpc = X86::ROL64r1; break;
3680     case X86::SAR8ri:  NewOpc = X86::SAR8r1;  break;
3681     case X86::SAR16ri: NewOpc = X86::SAR16r1; break;
3682     case X86::SAR32ri: NewOpc = X86::SAR32r1; break;
3683     case X86::SAR64ri: NewOpc = X86::SAR64r1; break;
3684     case X86::SHR8ri:  NewOpc = X86::SHR8r1;  break;
3685     case X86::SHR16ri: NewOpc = X86::SHR16r1; break;
3686     case X86::SHR32ri: NewOpc = X86::SHR32r1; break;
3687     case X86::SHR64ri: NewOpc = X86::SHR64r1; break;
3688     case X86::SHL8ri:  NewOpc = X86::SHL8r1;  break;
3689     case X86::SHL16ri: NewOpc = X86::SHL16r1; break;
3690     case X86::SHL32ri: NewOpc = X86::SHL32r1; break;
3691     case X86::SHL64ri: NewOpc = X86::SHL64r1; break;
3692     }
3693 
3694     MCInst TmpInst;
3695     TmpInst.setOpcode(NewOpc);
3696     TmpInst.addOperand(Inst.getOperand(0));
3697     TmpInst.addOperand(Inst.getOperand(1));
3698     Inst = TmpInst;
3699     return true;
3700   }
3701   case X86::RCR8mi: case X86::RCR16mi: case X86::RCR32mi: case X86::RCR64mi:
3702   case X86::RCL8mi: case X86::RCL16mi: case X86::RCL32mi: case X86::RCL64mi:
3703   case X86::ROR8mi: case X86::ROR16mi: case X86::ROR32mi: case X86::ROR64mi:
3704   case X86::ROL8mi: case X86::ROL16mi: case X86::ROL32mi: case X86::ROL64mi:
3705   case X86::SAR8mi: case X86::SAR16mi: case X86::SAR32mi: case X86::SAR64mi:
3706   case X86::SHR8mi: case X86::SHR16mi: case X86::SHR32mi: case X86::SHR64mi:
3707   case X86::SHL8mi: case X86::SHL16mi: case X86::SHL32mi: case X86::SHL64mi: {
3708     // Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
3709     // FIXME: It would be great if we could just do this with an InstAlias.
3710     if (!Inst.getOperand(X86::AddrNumOperands).isImm() ||
3711         Inst.getOperand(X86::AddrNumOperands).getImm() != 1)
3712       return false;
3713 
3714     unsigned NewOpc;
3715     switch (Inst.getOpcode()) {
3716     default: llvm_unreachable("Invalid opcode");
3717     case X86::RCR8mi:  NewOpc = X86::RCR8m1;  break;
3718     case X86::RCR16mi: NewOpc = X86::RCR16m1; break;
3719     case X86::RCR32mi: NewOpc = X86::RCR32m1; break;
3720     case X86::RCR64mi: NewOpc = X86::RCR64m1; break;
3721     case X86::RCL8mi:  NewOpc = X86::RCL8m1;  break;
3722     case X86::RCL16mi: NewOpc = X86::RCL16m1; break;
3723     case X86::RCL32mi: NewOpc = X86::RCL32m1; break;
3724     case X86::RCL64mi: NewOpc = X86::RCL64m1; break;
3725     case X86::ROR8mi:  NewOpc = X86::ROR8m1;  break;
3726     case X86::ROR16mi: NewOpc = X86::ROR16m1; break;
3727     case X86::ROR32mi: NewOpc = X86::ROR32m1; break;
3728     case X86::ROR64mi: NewOpc = X86::ROR64m1; break;
3729     case X86::ROL8mi:  NewOpc = X86::ROL8m1;  break;
3730     case X86::ROL16mi: NewOpc = X86::ROL16m1; break;
3731     case X86::ROL32mi: NewOpc = X86::ROL32m1; break;
3732     case X86::ROL64mi: NewOpc = X86::ROL64m1; break;
3733     case X86::SAR8mi:  NewOpc = X86::SAR8m1;  break;
3734     case X86::SAR16mi: NewOpc = X86::SAR16m1; break;
3735     case X86::SAR32mi: NewOpc = X86::SAR32m1; break;
3736     case X86::SAR64mi: NewOpc = X86::SAR64m1; break;
3737     case X86::SHR8mi:  NewOpc = X86::SHR8m1;  break;
3738     case X86::SHR16mi: NewOpc = X86::SHR16m1; break;
3739     case X86::SHR32mi: NewOpc = X86::SHR32m1; break;
3740     case X86::SHR64mi: NewOpc = X86::SHR64m1; break;
3741     case X86::SHL8mi:  NewOpc = X86::SHL8m1;  break;
3742     case X86::SHL16mi: NewOpc = X86::SHL16m1; break;
3743     case X86::SHL32mi: NewOpc = X86::SHL32m1; break;
3744     case X86::SHL64mi: NewOpc = X86::SHL64m1; break;
3745     }
3746 
3747     MCInst TmpInst;
3748     TmpInst.setOpcode(NewOpc);
3749     for (int i = 0; i != X86::AddrNumOperands; ++i)
3750       TmpInst.addOperand(Inst.getOperand(i));
3751     Inst = TmpInst;
3752     return true;
3753   }
3754   case X86::INT: {
3755     // Transforms "int $3" into "int3" as a size optimization.  We can't write an
3756     // instalias with an immediate operand yet.
3757     if (!Inst.getOperand(0).isImm() || Inst.getOperand(0).getImm() != 3)
3758       return false;
3759 
3760     MCInst TmpInst;
3761     TmpInst.setOpcode(X86::INT3);
3762     Inst = TmpInst;
3763     return true;
3764   }
3765   }
3766 }
3767 
3768 bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
3769   const MCRegisterInfo *MRI = getContext().getRegisterInfo();
3770 
3771   switch (Inst.getOpcode()) {
3772   case X86::VGATHERDPDYrm:
3773   case X86::VGATHERDPDrm:
3774   case X86::VGATHERDPSYrm:
3775   case X86::VGATHERDPSrm:
3776   case X86::VGATHERQPDYrm:
3777   case X86::VGATHERQPDrm:
3778   case X86::VGATHERQPSYrm:
3779   case X86::VGATHERQPSrm:
3780   case X86::VPGATHERDDYrm:
3781   case X86::VPGATHERDDrm:
3782   case X86::VPGATHERDQYrm:
3783   case X86::VPGATHERDQrm:
3784   case X86::VPGATHERQDYrm:
3785   case X86::VPGATHERQDrm:
3786   case X86::VPGATHERQQYrm:
3787   case X86::VPGATHERQQrm: {
3788     unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
3789     unsigned Mask = MRI->getEncodingValue(Inst.getOperand(1).getReg());
3790     unsigned Index =
3791       MRI->getEncodingValue(Inst.getOperand(3 + X86::AddrIndexReg).getReg());
3792     if (Dest == Mask || Dest == Index || Mask == Index)
3793       return Warning(Ops[0]->getStartLoc(), "mask, index, and destination "
3794                                             "registers should be distinct");
3795     break;
3796   }
3797   case X86::VGATHERDPDZ128rm:
3798   case X86::VGATHERDPDZ256rm:
3799   case X86::VGATHERDPDZrm:
3800   case X86::VGATHERDPSZ128rm:
3801   case X86::VGATHERDPSZ256rm:
3802   case X86::VGATHERDPSZrm:
3803   case X86::VGATHERQPDZ128rm:
3804   case X86::VGATHERQPDZ256rm:
3805   case X86::VGATHERQPDZrm:
3806   case X86::VGATHERQPSZ128rm:
3807   case X86::VGATHERQPSZ256rm:
3808   case X86::VGATHERQPSZrm:
3809   case X86::VPGATHERDDZ128rm:
3810   case X86::VPGATHERDDZ256rm:
3811   case X86::VPGATHERDDZrm:
3812   case X86::VPGATHERDQZ128rm:
3813   case X86::VPGATHERDQZ256rm:
3814   case X86::VPGATHERDQZrm:
3815   case X86::VPGATHERQDZ128rm:
3816   case X86::VPGATHERQDZ256rm:
3817   case X86::VPGATHERQDZrm:
3818   case X86::VPGATHERQQZ128rm:
3819   case X86::VPGATHERQQZ256rm:
3820   case X86::VPGATHERQQZrm: {
3821     unsigned Dest = MRI->getEncodingValue(Inst.getOperand(0).getReg());
3822     unsigned Index =
3823       MRI->getEncodingValue(Inst.getOperand(4 + X86::AddrIndexReg).getReg());
3824     if (Dest == Index)
3825       return Warning(Ops[0]->getStartLoc(), "index and destination registers "
3826                                             "should be distinct");
3827     break;
3828   }
3829   case X86::V4FMADDPSrm:
3830   case X86::V4FMADDPSrmk:
3831   case X86::V4FMADDPSrmkz:
3832   case X86::V4FMADDSSrm:
3833   case X86::V4FMADDSSrmk:
3834   case X86::V4FMADDSSrmkz:
3835   case X86::V4FNMADDPSrm:
3836   case X86::V4FNMADDPSrmk:
3837   case X86::V4FNMADDPSrmkz:
3838   case X86::V4FNMADDSSrm:
3839   case X86::V4FNMADDSSrmk:
3840   case X86::V4FNMADDSSrmkz:
3841   case X86::VP4DPWSSDSrm:
3842   case X86::VP4DPWSSDSrmk:
3843   case X86::VP4DPWSSDSrmkz:
3844   case X86::VP4DPWSSDrm:
3845   case X86::VP4DPWSSDrmk:
3846   case X86::VP4DPWSSDrmkz: {
3847     unsigned Src2 = Inst.getOperand(Inst.getNumOperands() -
3848                                     X86::AddrNumOperands - 1).getReg();
3849     unsigned Src2Enc = MRI->getEncodingValue(Src2);
3850     if (Src2Enc % 4 != 0) {
3851       StringRef RegName = X86IntelInstPrinter::getRegisterName(Src2);
3852       unsigned GroupStart = (Src2Enc / 4) * 4;
3853       unsigned GroupEnd = GroupStart + 3;
3854       return Warning(Ops[0]->getStartLoc(),
3855                      "source register '" + RegName + "' implicitly denotes '" +
3856                      RegName.take_front(3) + Twine(GroupStart) + "' to '" +
3857                      RegName.take_front(3) + Twine(GroupEnd) +
3858                      "' source group");
3859     }
3860     break;
3861   }
3862   }
3863 
3864   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3865   // Check that we aren't mixing AH/BH/CH/DH with REX prefix. We only need to
3866   // check this with the legacy encoding, VEX/EVEX/XOP don't use REX.
3867   if ((MCID.TSFlags & X86II::EncodingMask) == 0) {
3868     MCPhysReg HReg = X86::NoRegister;
3869     bool UsesRex = MCID.TSFlags & X86II::REX_W;
3870     unsigned NumOps = Inst.getNumOperands();
3871     for (unsigned i = 0; i != NumOps; ++i) {
3872       const MCOperand &MO = Inst.getOperand(i);
3873       if (!MO.isReg())
3874         continue;
3875       unsigned Reg = MO.getReg();
3876       if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
3877         HReg = Reg;
3878       if (X86II::isX86_64NonExtLowByteReg(Reg) ||
3879           X86II::isX86_64ExtendedReg(Reg))
3880         UsesRex = true;
3881     }
3882 
3883     if (UsesRex && HReg != X86::NoRegister) {
3884       StringRef RegName = X86IntelInstPrinter::getRegisterName(HReg);
3885       return Error(Ops[0]->getStartLoc(),
3886                    "can't encode '" + RegName + "' in an instruction requiring "
3887                    "REX prefix");
3888     }
3889   }
3890 
3891   return false;
3892 }
3893 
3894 static const char *getSubtargetFeatureName(uint64_t Val);
3895 
3896 void X86AsmParser::emitWarningForSpecialLVIInstruction(SMLoc Loc) {
3897   Warning(Loc, "Instruction may be vulnerable to LVI and "
3898                "requires manual mitigation");
3899   Note(SMLoc(), "See https://software.intel.com/"
3900                 "security-software-guidance/insights/"
3901                 "deep-dive-load-value-injection#specialinstructions"
3902                 " for more information");
3903 }
3904 
3905 /// RET instructions and also instructions that indirect calls/jumps from memory
3906 /// combine a load and a branch within a single instruction. To mitigate these
3907 /// instructions against LVI, they must be decomposed into separate load and
3908 /// branch instructions, with an LFENCE in between. For more details, see:
3909 /// - X86LoadValueInjectionRetHardening.cpp
3910 /// - X86LoadValueInjectionIndirectThunks.cpp
3911 /// - https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
3912 ///
3913 /// Returns `true` if a mitigation was applied or warning was emitted.
3914 void X86AsmParser::applyLVICFIMitigation(MCInst &Inst, MCStreamer &Out) {
3915   // Information on control-flow instructions that require manual mitigation can
3916   // be found here:
3917   // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
3918   switch (Inst.getOpcode()) {
3919   case X86::RETW:
3920   case X86::RETL:
3921   case X86::RETQ:
3922   case X86::RETIL:
3923   case X86::RETIQ:
3924   case X86::RETIW: {
3925     MCInst ShlInst, FenceInst;
3926     bool Parse32 = is32BitMode() || Code16GCC;
3927     unsigned Basereg =
3928         is64BitMode() ? X86::RSP : (Parse32 ? X86::ESP : X86::SP);
3929     const MCExpr *Disp = MCConstantExpr::create(0, getContext());
3930     auto ShlMemOp = X86Operand::CreateMem(getPointerWidth(), /*SegReg=*/0, Disp,
3931                                           /*BaseReg=*/Basereg, /*IndexReg=*/0,
3932                                           /*Scale=*/1, SMLoc{}, SMLoc{}, 0);
3933     ShlInst.setOpcode(X86::SHL64mi);
3934     ShlMemOp->addMemOperands(ShlInst, 5);
3935     ShlInst.addOperand(MCOperand::createImm(0));
3936     FenceInst.setOpcode(X86::LFENCE);
3937     Out.emitInstruction(ShlInst, getSTI());
3938     Out.emitInstruction(FenceInst, getSTI());
3939     return;
3940   }
3941   case X86::JMP16m:
3942   case X86::JMP32m:
3943   case X86::JMP64m:
3944   case X86::CALL16m:
3945   case X86::CALL32m:
3946   case X86::CALL64m:
3947     emitWarningForSpecialLVIInstruction(Inst.getLoc());
3948     return;
3949   }
3950 }
3951 
3952 /// To mitigate LVI, every instruction that performs a load can be followed by
3953 /// an LFENCE instruction to squash any potential mis-speculation. There are
3954 /// some instructions that require additional considerations, and may requre
3955 /// manual mitigation. For more details, see:
3956 /// https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection
3957 ///
3958 /// Returns `true` if a mitigation was applied or warning was emitted.
3959 void X86AsmParser::applyLVILoadHardeningMitigation(MCInst &Inst,
3960                                                    MCStreamer &Out) {
3961   auto Opcode = Inst.getOpcode();
3962   auto Flags = Inst.getFlags();
3963   if ((Flags & X86::IP_HAS_REPEAT) || (Flags & X86::IP_HAS_REPEAT_NE)) {
3964     // Information on REP string instructions that require manual mitigation can
3965     // be found here:
3966     // https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions
3967     switch (Opcode) {
3968     case X86::CMPSB:
3969     case X86::CMPSW:
3970     case X86::CMPSL:
3971     case X86::CMPSQ:
3972     case X86::SCASB:
3973     case X86::SCASW:
3974     case X86::SCASL:
3975     case X86::SCASQ:
3976       emitWarningForSpecialLVIInstruction(Inst.getLoc());
3977       return;
3978     }
3979   } else if (Opcode == X86::REP_PREFIX || Opcode == X86::REPNE_PREFIX) {
3980     // If a REP instruction is found on its own line, it may or may not be
3981     // followed by a vulnerable instruction. Emit a warning just in case.
3982     emitWarningForSpecialLVIInstruction(Inst.getLoc());
3983     return;
3984   }
3985 
3986   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3987 
3988   // Can't mitigate after terminators or calls. A control flow change may have
3989   // already occurred.
3990   if (MCID.isTerminator() || MCID.isCall())
3991     return;
3992 
3993   // LFENCE has the mayLoad property, don't double fence.
3994   if (MCID.mayLoad() && Inst.getOpcode() != X86::LFENCE) {
3995     MCInst FenceInst;
3996     FenceInst.setOpcode(X86::LFENCE);
3997     Out.emitInstruction(FenceInst, getSTI());
3998   }
3999 }
4000 
4001 void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
4002                                    MCStreamer &Out) {
4003   if (LVIInlineAsmHardening &&
4004       getSTI().getFeatureBits()[X86::FeatureLVIControlFlowIntegrity])
4005     applyLVICFIMitigation(Inst, Out);
4006 
4007   Out.emitInstruction(Inst, getSTI());
4008 
4009   if (LVIInlineAsmHardening &&
4010       getSTI().getFeatureBits()[X86::FeatureLVILoadHardening])
4011     applyLVILoadHardeningMitigation(Inst, Out);
4012 }
4013 
4014 bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4015                                            OperandVector &Operands,
4016                                            MCStreamer &Out, uint64_t &ErrorInfo,
4017                                            bool MatchingInlineAsm) {
4018   if (isParsingIntelSyntax())
4019     return MatchAndEmitIntelInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
4020                                         MatchingInlineAsm);
4021   return MatchAndEmitATTInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
4022                                     MatchingInlineAsm);
4023 }
4024 
4025 void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
4026                                      OperandVector &Operands, MCStreamer &Out,
4027                                      bool MatchingInlineAsm) {
4028   // FIXME: This should be replaced with a real .td file alias mechanism.
4029   // Also, MatchInstructionImpl should actually *do* the EmitInstruction
4030   // call.
4031   const char *Repl = StringSwitch<const char *>(Op.getToken())
4032                          .Case("finit", "fninit")
4033                          .Case("fsave", "fnsave")
4034                          .Case("fstcw", "fnstcw")
4035                          .Case("fstcww", "fnstcw")
4036                          .Case("fstenv", "fnstenv")
4037                          .Case("fstsw", "fnstsw")
4038                          .Case("fstsww", "fnstsw")
4039                          .Case("fclex", "fnclex")
4040                          .Default(nullptr);
4041   if (Repl) {
4042     MCInst Inst;
4043     Inst.setOpcode(X86::WAIT);
4044     Inst.setLoc(IDLoc);
4045     if (!MatchingInlineAsm)
4046       emitInstruction(Inst, Operands, Out);
4047     Operands[0] = X86Operand::CreateToken(Repl, IDLoc);
4048   }
4049 }
4050 
4051 bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc,
4052                                        const FeatureBitset &MissingFeatures,
4053                                        bool MatchingInlineAsm) {
4054   assert(MissingFeatures.any() && "Unknown missing feature!");
4055   SmallString<126> Msg;
4056   raw_svector_ostream OS(Msg);
4057   OS << "instruction requires:";
4058   for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
4059     if (MissingFeatures[i])
4060       OS << ' ' << getSubtargetFeatureName(i);
4061   }
4062   return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
4063 }
4064 
4065 static unsigned getPrefixes(OperandVector &Operands) {
4066   unsigned Result = 0;
4067   X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
4068   if (Prefix.isPrefix()) {
4069     Result = Prefix.getPrefix();
4070     Operands.pop_back();
4071   }
4072   return Result;
4073 }
4074 
4075 unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
4076   unsigned Opc = Inst.getOpcode();
4077   const MCInstrDesc &MCID = MII.get(Opc);
4078 
4079   if (ForcedVEXEncoding == VEXEncoding_EVEX &&
4080       (MCID.TSFlags & X86II::EncodingMask) != X86II::EVEX)
4081     return Match_Unsupported;
4082 
4083   if ((ForcedVEXEncoding == VEXEncoding_VEX ||
4084        ForcedVEXEncoding == VEXEncoding_VEX2 ||
4085        ForcedVEXEncoding == VEXEncoding_VEX3) &&
4086       (MCID.TSFlags & X86II::EncodingMask) != X86II::VEX)
4087     return Match_Unsupported;
4088 
4089   // These instructions are only available with {vex}, {vex2} or {vex3} prefix
4090   if (MCID.TSFlags & X86II::ExplicitVEXPrefix &&
4091       (ForcedVEXEncoding != VEXEncoding_VEX &&
4092        ForcedVEXEncoding != VEXEncoding_VEX2 &&
4093        ForcedVEXEncoding != VEXEncoding_VEX3))
4094     return Match_Unsupported;
4095 
4096   // These instructions match ambiguously with their VEX encoded counterparts
4097   // and appear first in the matching table. Reject them unless we're forcing
4098   // EVEX encoding.
4099   // FIXME: We really need a way to break the ambiguity.
4100   switch (Opc) {
4101   case X86::VCVTSD2SIZrm_Int:
4102   case X86::VCVTSD2SI64Zrm_Int:
4103   case X86::VCVTSS2SIZrm_Int:
4104   case X86::VCVTSS2SI64Zrm_Int:
4105   case X86::VCVTTSD2SIZrm:   case X86::VCVTTSD2SIZrm_Int:
4106   case X86::VCVTTSD2SI64Zrm: case X86::VCVTTSD2SI64Zrm_Int:
4107   case X86::VCVTTSS2SIZrm:   case X86::VCVTTSS2SIZrm_Int:
4108   case X86::VCVTTSS2SI64Zrm: case X86::VCVTTSS2SI64Zrm_Int:
4109     if (ForcedVEXEncoding != VEXEncoding_EVEX)
4110       return Match_Unsupported;
4111     break;
4112   }
4113 
4114   return Match_Success;
4115 }
4116 
4117 bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
4118                                               OperandVector &Operands,
4119                                               MCStreamer &Out,
4120                                               uint64_t &ErrorInfo,
4121                                               bool MatchingInlineAsm) {
4122   assert(!Operands.empty() && "Unexpect empty operand list!");
4123   assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
4124   SMRange EmptyRange = None;
4125 
4126   // First, handle aliases that expand to multiple instructions.
4127   MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
4128                     Out, MatchingInlineAsm);
4129   X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
4130   unsigned Prefixes = getPrefixes(Operands);
4131 
4132   MCInst Inst;
4133 
4134   // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
4135   // encoder and printer.
4136   if (ForcedVEXEncoding == VEXEncoding_VEX)
4137     Prefixes |= X86::IP_USE_VEX;
4138   else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4139     Prefixes |= X86::IP_USE_VEX2;
4140   else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4141     Prefixes |= X86::IP_USE_VEX3;
4142   else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4143     Prefixes |= X86::IP_USE_EVEX;
4144 
4145   // Set encoded flags for {disp8} and {disp32}.
4146   if (ForcedDispEncoding == DispEncoding_Disp8)
4147     Prefixes |= X86::IP_USE_DISP8;
4148   else if (ForcedDispEncoding == DispEncoding_Disp32)
4149     Prefixes |= X86::IP_USE_DISP32;
4150 
4151   if (Prefixes)
4152     Inst.setFlags(Prefixes);
4153 
4154   // In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode
4155   // when matching the instruction.
4156   if (ForcedDataPrefix == X86::Mode32Bit)
4157     SwitchMode(X86::Mode32Bit);
4158   // First, try a direct match.
4159   FeatureBitset MissingFeatures;
4160   unsigned OriginalError = MatchInstruction(Operands, Inst, ErrorInfo,
4161                                             MissingFeatures, MatchingInlineAsm,
4162                                             isParsingIntelSyntax());
4163   if (ForcedDataPrefix == X86::Mode32Bit) {
4164     SwitchMode(X86::Mode16Bit);
4165     ForcedDataPrefix = 0;
4166   }
4167   switch (OriginalError) {
4168   default: llvm_unreachable("Unexpected match result!");
4169   case Match_Success:
4170     if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4171       return true;
4172     // Some instructions need post-processing to, for example, tweak which
4173     // encoding is selected. Loop on it while changes happen so the
4174     // individual transformations can chain off each other.
4175     if (!MatchingInlineAsm)
4176       while (processInstruction(Inst, Operands))
4177         ;
4178 
4179     Inst.setLoc(IDLoc);
4180     if (!MatchingInlineAsm)
4181       emitInstruction(Inst, Operands, Out);
4182     Opcode = Inst.getOpcode();
4183     return false;
4184   case Match_InvalidImmUnsignedi4: {
4185     SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
4186     if (ErrorLoc == SMLoc())
4187       ErrorLoc = IDLoc;
4188     return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
4189                  EmptyRange, MatchingInlineAsm);
4190   }
4191   case Match_MissingFeature:
4192     return ErrorMissingFeature(IDLoc, MissingFeatures, MatchingInlineAsm);
4193   case Match_InvalidOperand:
4194   case Match_MnemonicFail:
4195   case Match_Unsupported:
4196     break;
4197   }
4198   if (Op.getToken().empty()) {
4199     Error(IDLoc, "instruction must have size higher than 0", EmptyRange,
4200           MatchingInlineAsm);
4201     return true;
4202   }
4203 
4204   // FIXME: Ideally, we would only attempt suffix matches for things which are
4205   // valid prefixes, and we could just infer the right unambiguous
4206   // type. However, that requires substantially more matcher support than the
4207   // following hack.
4208 
4209   // Change the operand to point to a temporary token.
4210   StringRef Base = Op.getToken();
4211   SmallString<16> Tmp;
4212   Tmp += Base;
4213   Tmp += ' ';
4214   Op.setTokenValue(Tmp);
4215 
4216   // If this instruction starts with an 'f', then it is a floating point stack
4217   // instruction.  These come in up to three forms for 32-bit, 64-bit, and
4218   // 80-bit floating point, which use the suffixes s,l,t respectively.
4219   //
4220   // Otherwise, we assume that this may be an integer instruction, which comes
4221   // in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
4222   const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
4223   // MemSize corresponding to Suffixes.  { 8, 16, 32, 64 }    { 32, 64, 80, 0 }
4224   const char *MemSize = Base[0] != 'f' ? "\x08\x10\x20\x40" : "\x20\x40\x50\0";
4225 
4226   // Check for the various suffix matches.
4227   uint64_t ErrorInfoIgnore;
4228   FeatureBitset ErrorInfoMissingFeatures; // Init suppresses compiler warnings.
4229   unsigned Match[4];
4230 
4231   // Some instruction like VPMULDQ is NOT the variant of VPMULD but a new one.
4232   // So we should make sure the suffix matcher only works for memory variant
4233   // that has the same size with the suffix.
4234   // FIXME: This flag is a workaround for legacy instructions that didn't
4235   // declare non suffix variant assembly.
4236   bool HasVectorReg = false;
4237   X86Operand *MemOp = nullptr;
4238   for (const auto &Op : Operands) {
4239     X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
4240     if (X86Op->isVectorReg())
4241       HasVectorReg = true;
4242     else if (X86Op->isMem()) {
4243       MemOp = X86Op;
4244       assert(MemOp->Mem.Size == 0 && "Memory size always 0 under ATT syntax");
4245       // Have we found an unqualified memory operand,
4246       // break. IA allows only one memory operand.
4247       break;
4248     }
4249   }
4250 
4251   for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
4252     Tmp.back() = Suffixes[I];
4253     if (MemOp && HasVectorReg)
4254       MemOp->Mem.Size = MemSize[I];
4255     Match[I] = Match_MnemonicFail;
4256     if (MemOp || !HasVectorReg) {
4257       Match[I] =
4258           MatchInstruction(Operands, Inst, ErrorInfoIgnore, MissingFeatures,
4259                            MatchingInlineAsm, isParsingIntelSyntax());
4260       // If this returned as a missing feature failure, remember that.
4261       if (Match[I] == Match_MissingFeature)
4262         ErrorInfoMissingFeatures = MissingFeatures;
4263     }
4264   }
4265 
4266   // Restore the old token.
4267   Op.setTokenValue(Base);
4268 
4269   // If exactly one matched, then we treat that as a successful match (and the
4270   // instruction will already have been filled in correctly, since the failing
4271   // matches won't have modified it).
4272   unsigned NumSuccessfulMatches =
4273       std::count(std::begin(Match), std::end(Match), Match_Success);
4274   if (NumSuccessfulMatches == 1) {
4275     if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4276       return true;
4277     // Some instructions need post-processing to, for example, tweak which
4278     // encoding is selected. Loop on it while changes happen so the
4279     // individual transformations can chain off each other.
4280     if (!MatchingInlineAsm)
4281       while (processInstruction(Inst, Operands))
4282         ;
4283 
4284     Inst.setLoc(IDLoc);
4285     if (!MatchingInlineAsm)
4286       emitInstruction(Inst, Operands, Out);
4287     Opcode = Inst.getOpcode();
4288     return false;
4289   }
4290 
4291   // Otherwise, the match failed, try to produce a decent error message.
4292 
4293   // If we had multiple suffix matches, then identify this as an ambiguous
4294   // match.
4295   if (NumSuccessfulMatches > 1) {
4296     char MatchChars[4];
4297     unsigned NumMatches = 0;
4298     for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I)
4299       if (Match[I] == Match_Success)
4300         MatchChars[NumMatches++] = Suffixes[I];
4301 
4302     SmallString<126> Msg;
4303     raw_svector_ostream OS(Msg);
4304     OS << "ambiguous instructions require an explicit suffix (could be ";
4305     for (unsigned i = 0; i != NumMatches; ++i) {
4306       if (i != 0)
4307         OS << ", ";
4308       if (i + 1 == NumMatches)
4309         OS << "or ";
4310       OS << "'" << Base << MatchChars[i] << "'";
4311     }
4312     OS << ")";
4313     Error(IDLoc, OS.str(), EmptyRange, MatchingInlineAsm);
4314     return true;
4315   }
4316 
4317   // Okay, we know that none of the variants matched successfully.
4318 
4319   // If all of the instructions reported an invalid mnemonic, then the original
4320   // mnemonic was invalid.
4321   if (std::count(std::begin(Match), std::end(Match), Match_MnemonicFail) == 4) {
4322     if (OriginalError == Match_MnemonicFail)
4323       return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'",
4324                    Op.getLocRange(), MatchingInlineAsm);
4325 
4326     if (OriginalError == Match_Unsupported)
4327       return Error(IDLoc, "unsupported instruction", EmptyRange,
4328                    MatchingInlineAsm);
4329 
4330     assert(OriginalError == Match_InvalidOperand && "Unexpected error");
4331     // Recover location info for the operand if we know which was the problem.
4332     if (ErrorInfo != ~0ULL) {
4333       if (ErrorInfo >= Operands.size())
4334         return Error(IDLoc, "too few operands for instruction", EmptyRange,
4335                      MatchingInlineAsm);
4336 
4337       X86Operand &Operand = (X86Operand &)*Operands[ErrorInfo];
4338       if (Operand.getStartLoc().isValid()) {
4339         SMRange OperandRange = Operand.getLocRange();
4340         return Error(Operand.getStartLoc(), "invalid operand for instruction",
4341                      OperandRange, MatchingInlineAsm);
4342       }
4343     }
4344 
4345     return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4346                  MatchingInlineAsm);
4347   }
4348 
4349   // If one instruction matched as unsupported, report this as unsupported.
4350   if (std::count(std::begin(Match), std::end(Match),
4351                  Match_Unsupported) == 1) {
4352     return Error(IDLoc, "unsupported instruction", EmptyRange,
4353                  MatchingInlineAsm);
4354   }
4355 
4356   // If one instruction matched with a missing feature, report this as a
4357   // missing feature.
4358   if (std::count(std::begin(Match), std::end(Match),
4359                  Match_MissingFeature) == 1) {
4360     ErrorInfo = Match_MissingFeature;
4361     return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4362                                MatchingInlineAsm);
4363   }
4364 
4365   // If one instruction matched with an invalid operand, report this as an
4366   // operand failure.
4367   if (std::count(std::begin(Match), std::end(Match),
4368                  Match_InvalidOperand) == 1) {
4369     return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4370                  MatchingInlineAsm);
4371   }
4372 
4373   // If all of these were an outright failure, report it in a useless way.
4374   Error(IDLoc, "unknown use of instruction mnemonic without a size suffix",
4375         EmptyRange, MatchingInlineAsm);
4376   return true;
4377 }
4378 
4379 bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
4380                                                 OperandVector &Operands,
4381                                                 MCStreamer &Out,
4382                                                 uint64_t &ErrorInfo,
4383                                                 bool MatchingInlineAsm) {
4384   assert(!Operands.empty() && "Unexpect empty operand list!");
4385   assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
4386   StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
4387   SMRange EmptyRange = None;
4388   StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
4389   unsigned Prefixes = getPrefixes(Operands);
4390 
4391   // First, handle aliases that expand to multiple instructions.
4392   MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands, Out, MatchingInlineAsm);
4393   X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
4394 
4395   MCInst Inst;
4396 
4397   // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
4398   // encoder and printer.
4399   if (ForcedVEXEncoding == VEXEncoding_VEX)
4400     Prefixes |= X86::IP_USE_VEX;
4401   else if (ForcedVEXEncoding == VEXEncoding_VEX2)
4402     Prefixes |= X86::IP_USE_VEX2;
4403   else if (ForcedVEXEncoding == VEXEncoding_VEX3)
4404     Prefixes |= X86::IP_USE_VEX3;
4405   else if (ForcedVEXEncoding == VEXEncoding_EVEX)
4406     Prefixes |= X86::IP_USE_EVEX;
4407 
4408   // Set encoded flags for {disp8} and {disp32}.
4409   if (ForcedDispEncoding == DispEncoding_Disp8)
4410     Prefixes |= X86::IP_USE_DISP8;
4411   else if (ForcedDispEncoding == DispEncoding_Disp32)
4412     Prefixes |= X86::IP_USE_DISP32;
4413 
4414   if (Prefixes)
4415     Inst.setFlags(Prefixes);
4416 
4417   // Find one unsized memory operand, if present.
4418   X86Operand *UnsizedMemOp = nullptr;
4419   for (const auto &Op : Operands) {
4420     X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
4421     if (X86Op->isMemUnsized()) {
4422       UnsizedMemOp = X86Op;
4423       // Have we found an unqualified memory operand,
4424       // break. IA allows only one memory operand.
4425       break;
4426     }
4427   }
4428 
4429   // Allow some instructions to have implicitly pointer-sized operands.  This is
4430   // compatible with gas.
4431   if (UnsizedMemOp) {
4432     static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
4433     for (const char *Instr : PtrSizedInstrs) {
4434       if (Mnemonic == Instr) {
4435         UnsizedMemOp->Mem.Size = getPointerWidth();
4436         break;
4437       }
4438     }
4439   }
4440 
4441   SmallVector<unsigned, 8> Match;
4442   FeatureBitset ErrorInfoMissingFeatures;
4443   FeatureBitset MissingFeatures;
4444 
4445   // If unsized push has immediate operand we should default the default pointer
4446   // size for the size.
4447   if (Mnemonic == "push" && Operands.size() == 2) {
4448     auto *X86Op = static_cast<X86Operand *>(Operands[1].get());
4449     if (X86Op->isImm()) {
4450       // If it's not a constant fall through and let remainder take care of it.
4451       const auto *CE = dyn_cast<MCConstantExpr>(X86Op->getImm());
4452       unsigned Size = getPointerWidth();
4453       if (CE &&
4454           (isIntN(Size, CE->getValue()) || isUIntN(Size, CE->getValue()))) {
4455         SmallString<16> Tmp;
4456         Tmp += Base;
4457         Tmp += (is64BitMode())
4458                    ? "q"
4459                    : (is32BitMode()) ? "l" : (is16BitMode()) ? "w" : " ";
4460         Op.setTokenValue(Tmp);
4461         // Do match in ATT mode to allow explicit suffix usage.
4462         Match.push_back(MatchInstruction(Operands, Inst, ErrorInfo,
4463                                          MissingFeatures, MatchingInlineAsm,
4464                                          false /*isParsingIntelSyntax()*/));
4465         Op.setTokenValue(Base);
4466       }
4467     }
4468   }
4469 
4470   // If an unsized memory operand is present, try to match with each memory
4471   // operand size.  In Intel assembly, the size is not part of the instruction
4472   // mnemonic.
4473   if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
4474     static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
4475     for (unsigned Size : MopSizes) {
4476       UnsizedMemOp->Mem.Size = Size;
4477       uint64_t ErrorInfoIgnore;
4478       unsigned LastOpcode = Inst.getOpcode();
4479       unsigned M = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
4480                                     MissingFeatures, MatchingInlineAsm,
4481                                     isParsingIntelSyntax());
4482       if (Match.empty() || LastOpcode != Inst.getOpcode())
4483         Match.push_back(M);
4484 
4485       // If this returned as a missing feature failure, remember that.
4486       if (Match.back() == Match_MissingFeature)
4487         ErrorInfoMissingFeatures = MissingFeatures;
4488     }
4489 
4490     // Restore the size of the unsized memory operand if we modified it.
4491     UnsizedMemOp->Mem.Size = 0;
4492   }
4493 
4494   // If we haven't matched anything yet, this is not a basic integer or FPU
4495   // operation.  There shouldn't be any ambiguity in our mnemonic table, so try
4496   // matching with the unsized operand.
4497   if (Match.empty()) {
4498     Match.push_back(MatchInstruction(
4499         Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
4500         isParsingIntelSyntax()));
4501     // If this returned as a missing feature failure, remember that.
4502     if (Match.back() == Match_MissingFeature)
4503       ErrorInfoMissingFeatures = MissingFeatures;
4504   }
4505 
4506   // Restore the size of the unsized memory operand if we modified it.
4507   if (UnsizedMemOp)
4508     UnsizedMemOp->Mem.Size = 0;
4509 
4510   // If it's a bad mnemonic, all results will be the same.
4511   if (Match.back() == Match_MnemonicFail) {
4512     return Error(IDLoc, "invalid instruction mnemonic '" + Mnemonic + "'",
4513                  Op.getLocRange(), MatchingInlineAsm);
4514   }
4515 
4516   unsigned NumSuccessfulMatches =
4517       std::count(std::begin(Match), std::end(Match), Match_Success);
4518 
4519   // If matching was ambiguous and we had size information from the frontend,
4520   // try again with that. This handles cases like "movxz eax, m8/m16".
4521   if (UnsizedMemOp && NumSuccessfulMatches > 1 &&
4522       UnsizedMemOp->getMemFrontendSize()) {
4523     UnsizedMemOp->Mem.Size = UnsizedMemOp->getMemFrontendSize();
4524     unsigned M = MatchInstruction(
4525         Operands, Inst, ErrorInfo, MissingFeatures, MatchingInlineAsm,
4526         isParsingIntelSyntax());
4527     if (M == Match_Success)
4528       NumSuccessfulMatches = 1;
4529 
4530     // Add a rewrite that encodes the size information we used from the
4531     // frontend.
4532     InstInfo->AsmRewrites->emplace_back(
4533         AOK_SizeDirective, UnsizedMemOp->getStartLoc(),
4534         /*Len=*/0, UnsizedMemOp->getMemFrontendSize());
4535   }
4536 
4537   // If exactly one matched, then we treat that as a successful match (and the
4538   // instruction will already have been filled in correctly, since the failing
4539   // matches won't have modified it).
4540   if (NumSuccessfulMatches == 1) {
4541     if (!MatchingInlineAsm && validateInstruction(Inst, Operands))
4542       return true;
4543     // Some instructions need post-processing to, for example, tweak which
4544     // encoding is selected. Loop on it while changes happen so the individual
4545     // transformations can chain off each other.
4546     if (!MatchingInlineAsm)
4547       while (processInstruction(Inst, Operands))
4548         ;
4549     Inst.setLoc(IDLoc);
4550     if (!MatchingInlineAsm)
4551       emitInstruction(Inst, Operands, Out);
4552     Opcode = Inst.getOpcode();
4553     return false;
4554   } else if (NumSuccessfulMatches > 1) {
4555     assert(UnsizedMemOp &&
4556            "multiple matches only possible with unsized memory operands");
4557     return Error(UnsizedMemOp->getStartLoc(),
4558                  "ambiguous operand size for instruction '" + Mnemonic + "\'",
4559                  UnsizedMemOp->getLocRange());
4560   }
4561 
4562   // If one instruction matched as unsupported, report this as unsupported.
4563   if (std::count(std::begin(Match), std::end(Match),
4564                  Match_Unsupported) == 1) {
4565     return Error(IDLoc, "unsupported instruction", EmptyRange,
4566                  MatchingInlineAsm);
4567   }
4568 
4569   // If one instruction matched with a missing feature, report this as a
4570   // missing feature.
4571   if (std::count(std::begin(Match), std::end(Match),
4572                  Match_MissingFeature) == 1) {
4573     ErrorInfo = Match_MissingFeature;
4574     return ErrorMissingFeature(IDLoc, ErrorInfoMissingFeatures,
4575                                MatchingInlineAsm);
4576   }
4577 
4578   // If one instruction matched with an invalid operand, report this as an
4579   // operand failure.
4580   if (std::count(std::begin(Match), std::end(Match),
4581                  Match_InvalidOperand) == 1) {
4582     return Error(IDLoc, "invalid operand for instruction", EmptyRange,
4583                  MatchingInlineAsm);
4584   }
4585 
4586   if (std::count(std::begin(Match), std::end(Match),
4587                  Match_InvalidImmUnsignedi4) == 1) {
4588     SMLoc ErrorLoc = ((X86Operand &)*Operands[ErrorInfo]).getStartLoc();
4589     if (ErrorLoc == SMLoc())
4590       ErrorLoc = IDLoc;
4591     return Error(ErrorLoc, "immediate must be an integer in range [0, 15]",
4592                  EmptyRange, MatchingInlineAsm);
4593   }
4594 
4595   // If all of these were an outright failure, report it in a useless way.
4596   return Error(IDLoc, "unknown instruction mnemonic", EmptyRange,
4597                MatchingInlineAsm);
4598 }
4599 
4600 bool X86AsmParser::OmitRegisterFromClobberLists(unsigned RegNo) {
4601   return X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo);
4602 }
4603 
4604 bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
4605   MCAsmParser &Parser = getParser();
4606   StringRef IDVal = DirectiveID.getIdentifier();
4607   if (IDVal.startswith(".arch"))
4608     return parseDirectiveArch();
4609   if (IDVal.startswith(".code"))
4610     return ParseDirectiveCode(IDVal, DirectiveID.getLoc());
4611   else if (IDVal.startswith(".att_syntax")) {
4612     if (getLexer().isNot(AsmToken::EndOfStatement)) {
4613       if (Parser.getTok().getString() == "prefix")
4614         Parser.Lex();
4615       else if (Parser.getTok().getString() == "noprefix")
4616         return Error(DirectiveID.getLoc(), "'.att_syntax noprefix' is not "
4617                                            "supported: registers must have a "
4618                                            "'%' prefix in .att_syntax");
4619     }
4620     getParser().setAssemblerDialect(0);
4621     return false;
4622   } else if (IDVal.startswith(".intel_syntax")) {
4623     getParser().setAssemblerDialect(1);
4624     if (getLexer().isNot(AsmToken::EndOfStatement)) {
4625       if (Parser.getTok().getString() == "noprefix")
4626         Parser.Lex();
4627       else if (Parser.getTok().getString() == "prefix")
4628         return Error(DirectiveID.getLoc(), "'.intel_syntax prefix' is not "
4629                                            "supported: registers must not have "
4630                                            "a '%' prefix in .intel_syntax");
4631     }
4632     return false;
4633   } else if (IDVal == ".nops")
4634     return parseDirectiveNops(DirectiveID.getLoc());
4635   else if (IDVal == ".even")
4636     return parseDirectiveEven(DirectiveID.getLoc());
4637   else if (IDVal == ".cv_fpo_proc")
4638     return parseDirectiveFPOProc(DirectiveID.getLoc());
4639   else if (IDVal == ".cv_fpo_setframe")
4640     return parseDirectiveFPOSetFrame(DirectiveID.getLoc());
4641   else if (IDVal == ".cv_fpo_pushreg")
4642     return parseDirectiveFPOPushReg(DirectiveID.getLoc());
4643   else if (IDVal == ".cv_fpo_stackalloc")
4644     return parseDirectiveFPOStackAlloc(DirectiveID.getLoc());
4645   else if (IDVal == ".cv_fpo_stackalign")
4646     return parseDirectiveFPOStackAlign(DirectiveID.getLoc());
4647   else if (IDVal == ".cv_fpo_endprologue")
4648     return parseDirectiveFPOEndPrologue(DirectiveID.getLoc());
4649   else if (IDVal == ".cv_fpo_endproc")
4650     return parseDirectiveFPOEndProc(DirectiveID.getLoc());
4651   else if (IDVal == ".seh_pushreg" ||
4652            (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushreg")))
4653     return parseDirectiveSEHPushReg(DirectiveID.getLoc());
4654   else if (IDVal == ".seh_setframe" ||
4655            (Parser.isParsingMasm() && IDVal.equals_insensitive(".setframe")))
4656     return parseDirectiveSEHSetFrame(DirectiveID.getLoc());
4657   else if (IDVal == ".seh_savereg" ||
4658            (Parser.isParsingMasm() && IDVal.equals_insensitive(".savereg")))
4659     return parseDirectiveSEHSaveReg(DirectiveID.getLoc());
4660   else if (IDVal == ".seh_savexmm" ||
4661            (Parser.isParsingMasm() && IDVal.equals_insensitive(".savexmm128")))
4662     return parseDirectiveSEHSaveXMM(DirectiveID.getLoc());
4663   else if (IDVal == ".seh_pushframe" ||
4664            (Parser.isParsingMasm() && IDVal.equals_insensitive(".pushframe")))
4665     return parseDirectiveSEHPushFrame(DirectiveID.getLoc());
4666 
4667   return true;
4668 }
4669 
4670 bool X86AsmParser::parseDirectiveArch() {
4671   // Ignore .arch for now.
4672   getParser().parseStringToEndOfStatement();
4673   return false;
4674 }
4675 
4676 /// parseDirectiveNops
4677 ///  ::= .nops size[, control]
4678 bool X86AsmParser::parseDirectiveNops(SMLoc L) {
4679   int64_t NumBytes = 0, Control = 0;
4680   SMLoc NumBytesLoc, ControlLoc;
4681   const MCSubtargetInfo STI = getSTI();
4682   NumBytesLoc = getTok().getLoc();
4683   if (getParser().checkForValidSection() ||
4684       getParser().parseAbsoluteExpression(NumBytes))
4685     return true;
4686 
4687   if (parseOptionalToken(AsmToken::Comma)) {
4688     ControlLoc = getTok().getLoc();
4689     if (getParser().parseAbsoluteExpression(Control))
4690       return true;
4691   }
4692   if (getParser().parseToken(AsmToken::EndOfStatement,
4693                              "unexpected token in '.nops' directive"))
4694     return true;
4695 
4696   if (NumBytes <= 0) {
4697     Error(NumBytesLoc, "'.nops' directive with non-positive size");
4698     return false;
4699   }
4700 
4701   if (Control < 0) {
4702     Error(ControlLoc, "'.nops' directive with negative NOP size");
4703     return false;
4704   }
4705 
4706   /// Emit nops
4707   getParser().getStreamer().emitNops(NumBytes, Control, L);
4708 
4709   return false;
4710 }
4711 
4712 /// parseDirectiveEven
4713 ///  ::= .even
4714 bool X86AsmParser::parseDirectiveEven(SMLoc L) {
4715   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4716     return false;
4717 
4718   const MCSection *Section = getStreamer().getCurrentSectionOnly();
4719   if (!Section) {
4720     getStreamer().InitSections(false);
4721     Section = getStreamer().getCurrentSectionOnly();
4722   }
4723   if (Section->UseCodeAlign())
4724     getStreamer().emitCodeAlignment(2, 0);
4725   else
4726     getStreamer().emitValueToAlignment(2, 0, 1, 0);
4727   return false;
4728 }
4729 
4730 /// ParseDirectiveCode
4731 ///  ::= .code16 | .code32 | .code64
4732 bool X86AsmParser::ParseDirectiveCode(StringRef IDVal, SMLoc L) {
4733   MCAsmParser &Parser = getParser();
4734   Code16GCC = false;
4735   if (IDVal == ".code16") {
4736     Parser.Lex();
4737     if (!is16BitMode()) {
4738       SwitchMode(X86::Mode16Bit);
4739       getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
4740     }
4741   } else if (IDVal == ".code16gcc") {
4742     // .code16gcc parses as if in 32-bit mode, but emits code in 16-bit mode.
4743     Parser.Lex();
4744     Code16GCC = true;
4745     if (!is16BitMode()) {
4746       SwitchMode(X86::Mode16Bit);
4747       getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
4748     }
4749   } else if (IDVal == ".code32") {
4750     Parser.Lex();
4751     if (!is32BitMode()) {
4752       SwitchMode(X86::Mode32Bit);
4753       getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
4754     }
4755   } else if (IDVal == ".code64") {
4756     Parser.Lex();
4757     if (!is64BitMode()) {
4758       SwitchMode(X86::Mode64Bit);
4759       getParser().getStreamer().emitAssemblerFlag(MCAF_Code64);
4760     }
4761   } else {
4762     Error(L, "unknown directive " + IDVal);
4763     return false;
4764   }
4765 
4766   return false;
4767 }
4768 
4769 // .cv_fpo_proc foo
4770 bool X86AsmParser::parseDirectiveFPOProc(SMLoc L) {
4771   MCAsmParser &Parser = getParser();
4772   StringRef ProcName;
4773   int64_t ParamsSize;
4774   if (Parser.parseIdentifier(ProcName))
4775     return Parser.TokError("expected symbol name");
4776   if (Parser.parseIntToken(ParamsSize, "expected parameter byte count"))
4777     return true;
4778   if (!isUIntN(32, ParamsSize))
4779     return Parser.TokError("parameters size out of range");
4780   if (parseEOL())
4781     return true;
4782   MCSymbol *ProcSym = getContext().getOrCreateSymbol(ProcName);
4783   return getTargetStreamer().emitFPOProc(ProcSym, ParamsSize, L);
4784 }
4785 
4786 // .cv_fpo_setframe ebp
4787 bool X86AsmParser::parseDirectiveFPOSetFrame(SMLoc L) {
4788   unsigned Reg;
4789   SMLoc DummyLoc;
4790   if (ParseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4791     return true;
4792   return getTargetStreamer().emitFPOSetFrame(Reg, L);
4793 }
4794 
4795 // .cv_fpo_pushreg ebx
4796 bool X86AsmParser::parseDirectiveFPOPushReg(SMLoc L) {
4797   unsigned Reg;
4798   SMLoc DummyLoc;
4799   if (ParseRegister(Reg, DummyLoc, DummyLoc) || parseEOL())
4800     return true;
4801   return getTargetStreamer().emitFPOPushReg(Reg, L);
4802 }
4803 
4804 // .cv_fpo_stackalloc 20
4805 bool X86AsmParser::parseDirectiveFPOStackAlloc(SMLoc L) {
4806   MCAsmParser &Parser = getParser();
4807   int64_t Offset;
4808   if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
4809     return true;
4810   return getTargetStreamer().emitFPOStackAlloc(Offset, L);
4811 }
4812 
4813 // .cv_fpo_stackalign 8
4814 bool X86AsmParser::parseDirectiveFPOStackAlign(SMLoc L) {
4815   MCAsmParser &Parser = getParser();
4816   int64_t Offset;
4817   if (Parser.parseIntToken(Offset, "expected offset") || parseEOL())
4818     return true;
4819   return getTargetStreamer().emitFPOStackAlign(Offset, L);
4820 }
4821 
4822 // .cv_fpo_endprologue
4823 bool X86AsmParser::parseDirectiveFPOEndPrologue(SMLoc L) {
4824   MCAsmParser &Parser = getParser();
4825   if (Parser.parseEOL())
4826     return true;
4827   return getTargetStreamer().emitFPOEndPrologue(L);
4828 }
4829 
4830 // .cv_fpo_endproc
4831 bool X86AsmParser::parseDirectiveFPOEndProc(SMLoc L) {
4832   MCAsmParser &Parser = getParser();
4833   if (Parser.parseEOL())
4834     return true;
4835   return getTargetStreamer().emitFPOEndProc(L);
4836 }
4837 
4838 bool X86AsmParser::parseSEHRegisterNumber(unsigned RegClassID,
4839                                           unsigned &RegNo) {
4840   SMLoc startLoc = getLexer().getLoc();
4841   const MCRegisterInfo *MRI = getContext().getRegisterInfo();
4842 
4843   // Try parsing the argument as a register first.
4844   if (getLexer().getTok().isNot(AsmToken::Integer)) {
4845     SMLoc endLoc;
4846     if (ParseRegister(RegNo, startLoc, endLoc))
4847       return true;
4848 
4849     if (!X86MCRegisterClasses[RegClassID].contains(RegNo)) {
4850       return Error(startLoc,
4851                    "register is not supported for use with this directive");
4852     }
4853   } else {
4854     // Otherwise, an integer number matching the encoding of the desired
4855     // register may appear.
4856     int64_t EncodedReg;
4857     if (getParser().parseAbsoluteExpression(EncodedReg))
4858       return true;
4859 
4860     // The SEH register number is the same as the encoding register number. Map
4861     // from the encoding back to the LLVM register number.
4862     RegNo = 0;
4863     for (MCPhysReg Reg : X86MCRegisterClasses[RegClassID]) {
4864       if (MRI->getEncodingValue(Reg) == EncodedReg) {
4865         RegNo = Reg;
4866         break;
4867       }
4868     }
4869     if (RegNo == 0) {
4870       return Error(startLoc,
4871                    "incorrect register number for use with this directive");
4872     }
4873   }
4874 
4875   return false;
4876 }
4877 
4878 bool X86AsmParser::parseDirectiveSEHPushReg(SMLoc Loc) {
4879   unsigned Reg = 0;
4880   if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4881     return true;
4882 
4883   if (getLexer().isNot(AsmToken::EndOfStatement))
4884     return TokError("unexpected token in directive");
4885 
4886   getParser().Lex();
4887   getStreamer().EmitWinCFIPushReg(Reg, Loc);
4888   return false;
4889 }
4890 
4891 bool X86AsmParser::parseDirectiveSEHSetFrame(SMLoc Loc) {
4892   unsigned Reg = 0;
4893   int64_t Off;
4894   if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4895     return true;
4896   if (getLexer().isNot(AsmToken::Comma))
4897     return TokError("you must specify a stack pointer offset");
4898 
4899   getParser().Lex();
4900   if (getParser().parseAbsoluteExpression(Off))
4901     return true;
4902 
4903   if (getLexer().isNot(AsmToken::EndOfStatement))
4904     return TokError("unexpected token in directive");
4905 
4906   getParser().Lex();
4907   getStreamer().EmitWinCFISetFrame(Reg, Off, Loc);
4908   return false;
4909 }
4910 
4911 bool X86AsmParser::parseDirectiveSEHSaveReg(SMLoc Loc) {
4912   unsigned Reg = 0;
4913   int64_t Off;
4914   if (parseSEHRegisterNumber(X86::GR64RegClassID, Reg))
4915     return true;
4916   if (getLexer().isNot(AsmToken::Comma))
4917     return TokError("you must specify an offset on the stack");
4918 
4919   getParser().Lex();
4920   if (getParser().parseAbsoluteExpression(Off))
4921     return true;
4922 
4923   if (getLexer().isNot(AsmToken::EndOfStatement))
4924     return TokError("unexpected token in directive");
4925 
4926   getParser().Lex();
4927   getStreamer().EmitWinCFISaveReg(Reg, Off, Loc);
4928   return false;
4929 }
4930 
4931 bool X86AsmParser::parseDirectiveSEHSaveXMM(SMLoc Loc) {
4932   unsigned Reg = 0;
4933   int64_t Off;
4934   if (parseSEHRegisterNumber(X86::VR128XRegClassID, Reg))
4935     return true;
4936   if (getLexer().isNot(AsmToken::Comma))
4937     return TokError("you must specify an offset on the stack");
4938 
4939   getParser().Lex();
4940   if (getParser().parseAbsoluteExpression(Off))
4941     return true;
4942 
4943   if (getLexer().isNot(AsmToken::EndOfStatement))
4944     return TokError("unexpected token in directive");
4945 
4946   getParser().Lex();
4947   getStreamer().EmitWinCFISaveXMM(Reg, Off, Loc);
4948   return false;
4949 }
4950 
4951 bool X86AsmParser::parseDirectiveSEHPushFrame(SMLoc Loc) {
4952   bool Code = false;
4953   StringRef CodeID;
4954   if (getLexer().is(AsmToken::At)) {
4955     SMLoc startLoc = getLexer().getLoc();
4956     getParser().Lex();
4957     if (!getParser().parseIdentifier(CodeID)) {
4958       if (CodeID != "code")
4959         return Error(startLoc, "expected @code");
4960       Code = true;
4961     }
4962   }
4963 
4964   if (getLexer().isNot(AsmToken::EndOfStatement))
4965     return TokError("unexpected token in directive");
4966 
4967   getParser().Lex();
4968   getStreamer().EmitWinCFIPushFrame(Code, Loc);
4969   return false;
4970 }
4971 
4972 // Force static initialization.
4973 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86AsmParser() {
4974   RegisterMCAsmParser<X86AsmParser> X(getTheX86_32Target());
4975   RegisterMCAsmParser<X86AsmParser> Y(getTheX86_64Target());
4976 }
4977 
4978 #define GET_REGISTER_MATCHER
4979 #define GET_MATCHER_IMPLEMENTATION
4980 #define GET_SUBTARGET_FEATURE_NAME
4981 #include "X86GenAsmMatcher.inc"
4982