xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86Operand.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/MC/MCSymbol.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/SMLoc.h"
24 #include <cassert>
25 #include <memory>
26 
27 namespace llvm {
28 
29 /// X86Operand - Instances of this class represent a parsed X86 machine
30 /// instruction.
31 struct X86Operand final : public MCParsedAsmOperand {
32   enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33 
34   SMLoc StartLoc, EndLoc;
35   SMLoc OffsetOfLoc;
36   StringRef SymName;
37   void *OpDecl;
38   bool AddressOf;
39 
40   /// This used for inline asm which may specify base reg and index reg for
41   /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
42   bool UseUpRegs = false;
43 
44   struct TokOp {
45     const char *Data;
46     unsigned Length;
47   };
48 
49   struct RegOp {
50     unsigned RegNo;
51   };
52 
53   struct PrefOp {
54     unsigned Prefixes;
55   };
56 
57   struct ImmOp {
58     const MCExpr *Val;
59     bool LocalRef;
60   };
61 
62   struct MemOp {
63     unsigned SegReg;
64     const MCExpr *Disp;
65     unsigned BaseReg;
66     unsigned DefaultBaseReg;
67     unsigned IndexReg;
68     unsigned Scale;
69     unsigned Size;
70     unsigned ModeSize;
71 
72     /// If the memory operand is unsized and there are multiple instruction
73     /// matches, prefer the one with this size.
74     unsigned FrontendSize;
75 
76     /// If false, then this operand must be a memory operand for an indirect
77     /// branch instruction. Otherwise, this operand may belong to either a
78     /// direct or indirect branch instruction.
79     bool MaybeDirectBranchDest;
80   };
81 
82   union {
83     struct TokOp Tok;
84     struct RegOp Reg;
85     struct ImmOp Imm;
86     struct MemOp Mem;
87     struct PrefOp Pref;
88   };
89 
X86Operandfinal90   X86Operand(KindTy K, SMLoc Start, SMLoc End)
91       : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
92         AddressOf(false) {}
93 
getSymNamefinal94   StringRef getSymName() override { return SymName; }
getOpDeclfinal95   void *getOpDecl() override { return OpDecl; }
96 
97   /// getStartLoc - Get the location of the first token of this operand.
getStartLocfinal98   SMLoc getStartLoc() const override { return StartLoc; }
99 
100   /// getEndLoc - Get the location of the last token of this operand.
getEndLocfinal101   SMLoc getEndLoc() const override { return EndLoc; }
102 
103   /// getLocRange - Get the range between the first and last token of this
104   /// operand.
getLocRangefinal105   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
106 
107   /// getOffsetOfLoc - Get the location of the offset operator.
getOffsetOfLocfinal108   SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
109 
printfinal110   void print(raw_ostream &OS) const override {
111 
112     auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
113       if (Val->getKind() == MCExpr::Constant) {
114         if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
115           OS << VName << Imm;
116       } else if (Val->getKind() == MCExpr::SymbolRef) {
117         if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
118           const MCSymbol &Sym = SRE->getSymbol();
119           if (const char *SymNameStr = Sym.getName().data())
120             OS << VName << SymNameStr;
121         }
122       }
123     };
124 
125     switch (Kind) {
126     case Token:
127       OS << Tok.Data;
128       break;
129     case Register:
130       OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
131       break;
132     case DXRegister:
133       OS << "DXReg";
134       break;
135     case Immediate:
136       PrintImmValue(Imm.Val, "Imm:");
137       break;
138     case Prefix:
139       OS << "Prefix:" << Pref.Prefixes;
140       break;
141     case Memory:
142       OS << "Memory: ModeSize=" << Mem.ModeSize;
143       if (Mem.Size)
144         OS << ",Size=" << Mem.Size;
145       if (Mem.BaseReg)
146         OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
147       if (Mem.IndexReg)
148         OS << ",IndexReg="
149            << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
150       if (Mem.Scale)
151         OS << ",Scale=" << Mem.Scale;
152       if (Mem.Disp)
153         PrintImmValue(Mem.Disp, ",Disp=");
154       if (Mem.SegReg)
155         OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
156       break;
157     }
158   }
159 
getTokenfinal160   StringRef getToken() const {
161     assert(Kind == Token && "Invalid access!");
162     return StringRef(Tok.Data, Tok.Length);
163   }
setTokenValuefinal164   void setTokenValue(StringRef Value) {
165     assert(Kind == Token && "Invalid access!");
166     Tok.Data = Value.data();
167     Tok.Length = Value.size();
168   }
169 
getRegfinal170   MCRegister getReg() const override {
171     assert(Kind == Register && "Invalid access!");
172     return Reg.RegNo;
173   }
174 
getPrefixfinal175   unsigned getPrefix() const {
176     assert(Kind == Prefix && "Invalid access!");
177     return Pref.Prefixes;
178   }
179 
getImmfinal180   const MCExpr *getImm() const {
181     assert(Kind == Immediate && "Invalid access!");
182     return Imm.Val;
183   }
184 
getMemDispfinal185   const MCExpr *getMemDisp() const {
186     assert(Kind == Memory && "Invalid access!");
187     return Mem.Disp;
188   }
getMemSegRegfinal189   unsigned getMemSegReg() const {
190     assert(Kind == Memory && "Invalid access!");
191     return Mem.SegReg;
192   }
getMemBaseRegfinal193   unsigned getMemBaseReg() const {
194     assert(Kind == Memory && "Invalid access!");
195     return Mem.BaseReg;
196   }
getMemDefaultBaseRegfinal197   unsigned getMemDefaultBaseReg() const {
198     assert(Kind == Memory && "Invalid access!");
199     return Mem.DefaultBaseReg;
200   }
getMemIndexRegfinal201   unsigned getMemIndexReg() const {
202     assert(Kind == Memory && "Invalid access!");
203     return Mem.IndexReg;
204   }
getMemScalefinal205   unsigned getMemScale() const {
206     assert(Kind == Memory && "Invalid access!");
207     return Mem.Scale;
208   }
getMemModeSizefinal209   unsigned getMemModeSize() const {
210     assert(Kind == Memory && "Invalid access!");
211     return Mem.ModeSize;
212   }
getMemFrontendSizefinal213   unsigned getMemFrontendSize() const {
214     assert(Kind == Memory && "Invalid access!");
215     return Mem.FrontendSize;
216   }
isMaybeDirectBranchDestfinal217   bool isMaybeDirectBranchDest() const {
218     assert(Kind == Memory && "Invalid access!");
219     return Mem.MaybeDirectBranchDest;
220   }
221 
isTokenfinal222   bool isToken() const override {return Kind == Token; }
223 
isImmfinal224   bool isImm() const override { return Kind == Immediate; }
225 
isImmSExti16i8final226   bool isImmSExti16i8() const {
227     if (!isImm())
228       return false;
229 
230     // If this isn't a constant expr, just assume it fits and let relaxation
231     // handle it.
232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
233     if (!CE)
234       return true;
235 
236     // Otherwise, check the value is in a range that makes sense for this
237     // extension.
238     return isImmSExti16i8Value(CE->getValue());
239   }
isImmSExti32i8final240   bool isImmSExti32i8() const {
241     if (!isImm())
242       return false;
243 
244     // If this isn't a constant expr, just assume it fits and let relaxation
245     // handle it.
246     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
247     if (!CE)
248       return true;
249 
250     // Otherwise, check the value is in a range that makes sense for this
251     // extension.
252     return isImmSExti32i8Value(CE->getValue());
253   }
isImmSExti64i8final254   bool isImmSExti64i8() const {
255     if (!isImm())
256       return false;
257 
258     // If this isn't a constant expr, just assume it fits and let relaxation
259     // handle it.
260     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
261     if (!CE)
262       return true;
263 
264     // Otherwise, check the value is in a range that makes sense for this
265     // extension.
266     return isImmSExti64i8Value(CE->getValue());
267   }
isImmSExti64i32final268   bool isImmSExti64i32() const {
269     if (!isImm())
270       return false;
271 
272     // If this isn't a constant expr, just assume it fits and let relaxation
273     // handle it.
274     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
275     if (!CE)
276       return true;
277 
278     // Otherwise, check the value is in a range that makes sense for this
279     // extension.
280     return isImmSExti64i32Value(CE->getValue());
281   }
282 
isImmUnsignedi4final283   bool isImmUnsignedi4() const {
284     if (!isImm()) return false;
285     // If this isn't a constant expr, reject it. The immediate byte is shared
286     // with a register encoding. We can't have it affected by a relocation.
287     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
288     if (!CE) return false;
289     return isImmUnsignedi4Value(CE->getValue());
290   }
291 
isImmUnsignedi8final292   bool isImmUnsignedi8() const {
293     if (!isImm()) return false;
294     // If this isn't a constant expr, just assume it fits and let relaxation
295     // handle it.
296     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
297     if (!CE) return true;
298     return isImmUnsignedi8Value(CE->getValue());
299   }
300 
isOffsetOfLocalfinal301   bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
302 
needAddressOffinal303   bool needAddressOf() const override { return AddressOf; }
304 
isMemfinal305   bool isMem() const override { return Kind == Memory; }
isMemUnsizedfinal306   bool isMemUnsized() const {
307     return Kind == Memory && Mem.Size == 0;
308   }
isMem8final309   bool isMem8() const {
310     return Kind == Memory && (!Mem.Size || Mem.Size == 8);
311   }
isMem16final312   bool isMem16() const {
313     return Kind == Memory && (!Mem.Size || Mem.Size == 16);
314   }
isMem32final315   bool isMem32() const {
316     return Kind == Memory && (!Mem.Size || Mem.Size == 32);
317   }
isMem64final318   bool isMem64() const {
319     return Kind == Memory && (!Mem.Size || Mem.Size == 64);
320   }
isMem80final321   bool isMem80() const {
322     return Kind == Memory && (!Mem.Size || Mem.Size == 80);
323   }
isMem128final324   bool isMem128() const {
325     return Kind == Memory && (!Mem.Size || Mem.Size == 128);
326   }
isMem256final327   bool isMem256() const {
328     return Kind == Memory && (!Mem.Size || Mem.Size == 256);
329   }
isMem512final330   bool isMem512() const {
331     return Kind == Memory && (!Mem.Size || Mem.Size == 512);
332   }
333 
isSibMemfinal334   bool isSibMem() const {
335     return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
336   }
337 
isMemIndexRegfinal338   bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
339     assert(Kind == Memory && "Invalid access!");
340     return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
341   }
342 
isMem64_RC128final343   bool isMem64_RC128() const {
344     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
345   }
isMem128_RC128final346   bool isMem128_RC128() const {
347     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
348   }
isMem128_RC256final349   bool isMem128_RC256() const {
350     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
351   }
isMem256_RC128final352   bool isMem256_RC128() const {
353     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
354   }
isMem256_RC256final355   bool isMem256_RC256() const {
356     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
357   }
358 
isMem64_RC128Xfinal359   bool isMem64_RC128X() const {
360     return isMem64() && X86II::isXMMReg(Mem.IndexReg);
361   }
isMem128_RC128Xfinal362   bool isMem128_RC128X() const {
363     return isMem128() && X86II::isXMMReg(Mem.IndexReg);
364   }
isMem128_RC256Xfinal365   bool isMem128_RC256X() const {
366     return isMem128() && X86II::isYMMReg(Mem.IndexReg);
367   }
isMem256_RC128Xfinal368   bool isMem256_RC128X() const {
369     return isMem256() && X86II::isXMMReg(Mem.IndexReg);
370   }
isMem256_RC256Xfinal371   bool isMem256_RC256X() const {
372     return isMem256() && X86II::isYMMReg(Mem.IndexReg);
373   }
isMem256_RC512final374   bool isMem256_RC512() const {
375     return isMem256() && X86II::isZMMReg(Mem.IndexReg);
376   }
isMem512_RC256Xfinal377   bool isMem512_RC256X() const {
378     return isMem512() && X86II::isYMMReg(Mem.IndexReg);
379   }
isMem512_RC512final380   bool isMem512_RC512() const {
381     return isMem512() && X86II::isZMMReg(Mem.IndexReg);
382   }
isMem512_GR16final383   bool isMem512_GR16() const {
384     if (!isMem512())
385       return false;
386     if (getMemBaseReg() &&
387         !X86MCRegisterClasses[X86::GR16RegClassID].contains(getMemBaseReg()))
388       return false;
389     return true;
390   }
isMem512_GR32final391   bool isMem512_GR32() const {
392     if (!isMem512())
393       return false;
394     if (getMemBaseReg() &&
395         !X86MCRegisterClasses[X86::GR32RegClassID].contains(getMemBaseReg()) &&
396         getMemBaseReg() != X86::EIP)
397       return false;
398     if (getMemIndexReg() &&
399         !X86MCRegisterClasses[X86::GR32RegClassID].contains(getMemIndexReg()) &&
400         getMemIndexReg() != X86::EIZ)
401       return false;
402     return true;
403   }
isMem512_GR64final404   bool isMem512_GR64() const {
405     if (!isMem512())
406       return false;
407     if (getMemBaseReg() &&
408         !X86MCRegisterClasses[X86::GR64RegClassID].contains(getMemBaseReg()) &&
409         getMemBaseReg() != X86::RIP)
410       return false;
411     if (getMemIndexReg() &&
412         !X86MCRegisterClasses[X86::GR64RegClassID].contains(getMemIndexReg()) &&
413         getMemIndexReg() != X86::RIZ)
414       return false;
415     return true;
416   }
417 
isAbsMemfinal418   bool isAbsMem() const {
419     return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
420            !getMemIndexReg() && getMemScale() == 1 && isMaybeDirectBranchDest();
421   }
422 
isAVX512RCfinal423   bool isAVX512RC() const{
424       return isImm();
425   }
426 
isAbsMem16final427   bool isAbsMem16() const {
428     return isAbsMem() && Mem.ModeSize == 16;
429   }
430 
isMemUseUpRegsfinal431   bool isMemUseUpRegs() const override { return UseUpRegs; }
432 
isSrcIdxfinal433   bool isSrcIdx() const {
434     return !getMemIndexReg() && getMemScale() == 1 &&
435       (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
436        getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
437       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
438   }
isSrcIdx8final439   bool isSrcIdx8() const {
440     return isMem8() && isSrcIdx();
441   }
isSrcIdx16final442   bool isSrcIdx16() const {
443     return isMem16() && isSrcIdx();
444   }
isSrcIdx32final445   bool isSrcIdx32() const {
446     return isMem32() && isSrcIdx();
447   }
isSrcIdx64final448   bool isSrcIdx64() const {
449     return isMem64() && isSrcIdx();
450   }
451 
isDstIdxfinal452   bool isDstIdx() const {
453     return !getMemIndexReg() && getMemScale() == 1 &&
454       (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
455       (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
456        getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
457       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
458   }
isDstIdx8final459   bool isDstIdx8() const {
460     return isMem8() && isDstIdx();
461   }
isDstIdx16final462   bool isDstIdx16() const {
463     return isMem16() && isDstIdx();
464   }
isDstIdx32final465   bool isDstIdx32() const {
466     return isMem32() && isDstIdx();
467   }
isDstIdx64final468   bool isDstIdx64() const {
469     return isMem64() && isDstIdx();
470   }
471 
isMemOffsfinal472   bool isMemOffs() const {
473     return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
474       getMemScale() == 1;
475   }
476 
isMemOffs16_8final477   bool isMemOffs16_8() const {
478     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
479   }
isMemOffs16_16final480   bool isMemOffs16_16() const {
481     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
482   }
isMemOffs16_32final483   bool isMemOffs16_32() const {
484     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
485   }
isMemOffs32_8final486   bool isMemOffs32_8() const {
487     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
488   }
isMemOffs32_16final489   bool isMemOffs32_16() const {
490     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
491   }
isMemOffs32_32final492   bool isMemOffs32_32() const {
493     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
494   }
isMemOffs32_64final495   bool isMemOffs32_64() const {
496     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
497   }
isMemOffs64_8final498   bool isMemOffs64_8() const {
499     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
500   }
isMemOffs64_16final501   bool isMemOffs64_16() const {
502     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
503   }
isMemOffs64_32final504   bool isMemOffs64_32() const {
505     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
506   }
isMemOffs64_64final507   bool isMemOffs64_64() const {
508     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
509   }
510 
isPrefixfinal511   bool isPrefix() const { return Kind == Prefix; }
isRegfinal512   bool isReg() const override { return Kind == Register; }
isDXRegfinal513   bool isDXReg() const { return Kind == DXRegister; }
514 
isGR32orGR64final515   bool isGR32orGR64() const {
516     return Kind == Register &&
517       (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
518        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
519   }
520 
isGR16orGR32orGR64final521   bool isGR16orGR32orGR64() const {
522     return Kind == Register &&
523       (X86MCRegisterClasses[X86::GR16RegClassID].contains(getReg()) ||
524        X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
525        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
526   }
527 
isVectorRegfinal528   bool isVectorReg() const {
529     return Kind == Register &&
530            (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) ||
531             X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) ||
532             X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) ||
533             X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg()));
534   }
535 
isVK1Pairfinal536   bool isVK1Pair() const {
537     return Kind == Register &&
538       X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
539   }
540 
isVK2Pairfinal541   bool isVK2Pair() const {
542     return Kind == Register &&
543       X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
544   }
545 
isVK4Pairfinal546   bool isVK4Pair() const {
547     return Kind == Register &&
548       X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
549   }
550 
isVK8Pairfinal551   bool isVK8Pair() const {
552     return Kind == Register &&
553       X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
554   }
555 
isVK16Pairfinal556   bool isVK16Pair() const {
557     return Kind == Register &&
558       X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
559   }
560 
addExprfinal561   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
562     // Add as immediates when possible.
563     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
564       Inst.addOperand(MCOperand::createImm(CE->getValue()));
565     else
566       Inst.addOperand(MCOperand::createExpr(Expr));
567   }
568 
addRegOperandsfinal569   void addRegOperands(MCInst &Inst, unsigned N) const {
570     assert(N == 1 && "Invalid number of operands!");
571     Inst.addOperand(MCOperand::createReg(getReg()));
572   }
573 
addGR32orGR64Operandsfinal574   void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
575     assert(N == 1 && "Invalid number of operands!");
576     MCRegister RegNo = getReg();
577     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
578       RegNo = getX86SubSuperRegister(RegNo, 32);
579     Inst.addOperand(MCOperand::createReg(RegNo));
580   }
581 
addGR16orGR32orGR64Operandsfinal582   void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
583     assert(N == 1 && "Invalid number of operands!");
584     MCRegister RegNo = getReg();
585     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo) ||
586         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
587       RegNo = getX86SubSuperRegister(RegNo, 16);
588     Inst.addOperand(MCOperand::createReg(RegNo));
589   }
590 
addAVX512RCOperandsfinal591   void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
592     assert(N == 1 && "Invalid number of operands!");
593     addExpr(Inst, getImm());
594   }
595 
addImmOperandsfinal596   void addImmOperands(MCInst &Inst, unsigned N) const {
597     assert(N == 1 && "Invalid number of operands!");
598     addExpr(Inst, getImm());
599   }
600 
addMaskPairOperandsfinal601   void addMaskPairOperands(MCInst &Inst, unsigned N) const {
602     assert(N == 1 && "Invalid number of operands!");
603     unsigned Reg = getReg();
604     switch (Reg) {
605     case X86::K0:
606     case X86::K1:
607       Reg = X86::K0_K1;
608       break;
609     case X86::K2:
610     case X86::K3:
611       Reg = X86::K2_K3;
612       break;
613     case X86::K4:
614     case X86::K5:
615       Reg = X86::K4_K5;
616       break;
617     case X86::K6:
618     case X86::K7:
619       Reg = X86::K6_K7;
620       break;
621     }
622     Inst.addOperand(MCOperand::createReg(Reg));
623   }
624 
addMemOperandsfinal625   void addMemOperands(MCInst &Inst, unsigned N) const {
626     assert((N == 5) && "Invalid number of operands!");
627     if (getMemBaseReg())
628       Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
629     else
630       Inst.addOperand(MCOperand::createReg(getMemDefaultBaseReg()));
631     Inst.addOperand(MCOperand::createImm(getMemScale()));
632     Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
633     addExpr(Inst, getMemDisp());
634     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
635   }
636 
addAbsMemOperandsfinal637   void addAbsMemOperands(MCInst &Inst, unsigned N) const {
638     assert((N == 1) && "Invalid number of operands!");
639     // Add as immediates when possible.
640     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
641       Inst.addOperand(MCOperand::createImm(CE->getValue()));
642     else
643       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
644   }
645 
addSrcIdxOperandsfinal646   void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
647     assert((N == 2) && "Invalid number of operands!");
648     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
649     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
650   }
651 
addDstIdxOperandsfinal652   void addDstIdxOperands(MCInst &Inst, unsigned N) const {
653     assert((N == 1) && "Invalid number of operands!");
654     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
655   }
656 
addMemOffsOperandsfinal657   void addMemOffsOperands(MCInst &Inst, unsigned N) const {
658     assert((N == 2) && "Invalid number of operands!");
659     // Add as immediates when possible.
660     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
661       Inst.addOperand(MCOperand::createImm(CE->getValue()));
662     else
663       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
664     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
665   }
666 
CreateTokenfinal667   static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
668     SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
669     auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
670     Res->Tok.Data = Str.data();
671     Res->Tok.Length = Str.size();
672     return Res;
673   }
674 
675   static std::unique_ptr<X86Operand>
676   CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
677             bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
678             StringRef SymName = StringRef(), void *OpDecl = nullptr) {
679     auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
680     Res->Reg.RegNo = RegNo;
681     Res->AddressOf = AddressOf;
682     Res->OffsetOfLoc = OffsetOfLoc;
683     Res->SymName = SymName;
684     Res->OpDecl = OpDecl;
685     return Res;
686   }
687 
688   static std::unique_ptr<X86Operand>
CreateDXRegfinal689   CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
690     return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
691   }
692 
693   static std::unique_ptr<X86Operand>
CreatePrefixfinal694   CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
695     auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
696     Res->Pref.Prefixes = Prefixes;
697     return Res;
698   }
699 
700   static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
701                                                SMLoc StartLoc, SMLoc EndLoc,
702                                                StringRef SymName = StringRef(),
703                                                void *OpDecl = nullptr,
704                                                bool GlobalRef = true) {
705     auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
706     Res->Imm.Val      = Val;
707     Res->Imm.LocalRef = !GlobalRef;
708     Res->SymName      = SymName;
709     Res->OpDecl       = OpDecl;
710     Res->AddressOf    = true;
711     return Res;
712   }
713 
714   /// Create an absolute memory operand.
715   static std::unique_ptr<X86Operand>
716   CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
717             unsigned Size = 0, StringRef SymName = StringRef(),
718             void *OpDecl = nullptr, unsigned FrontendSize = 0,
719             bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
720     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
721     Res->Mem.SegReg   = 0;
722     Res->Mem.Disp     = Disp;
723     Res->Mem.BaseReg  = 0;
724     Res->Mem.DefaultBaseReg = 0;
725     Res->Mem.IndexReg = 0;
726     Res->Mem.Scale    = 1;
727     Res->Mem.Size     = Size;
728     Res->Mem.ModeSize = ModeSize;
729     Res->Mem.FrontendSize = FrontendSize;
730     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
731     Res->UseUpRegs = UseUpRegs;
732     Res->SymName      = SymName;
733     Res->OpDecl       = OpDecl;
734     Res->AddressOf    = false;
735     return Res;
736   }
737 
738   /// Create a generalized memory operand.
739   static std::unique_ptr<X86Operand>
740   CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
741             unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
742             SMLoc EndLoc, unsigned Size = 0,
743             unsigned DefaultBaseReg = X86::NoRegister,
744             StringRef SymName = StringRef(), void *OpDecl = nullptr,
745             unsigned FrontendSize = 0, bool UseUpRegs = false,
746             bool MaybeDirectBranchDest = true) {
747     // We should never just have a displacement, that should be parsed as an
748     // absolute memory operand.
749     assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
750            "Invalid memory operand!");
751 
752     // The scale should always be one of {1,2,4,8}.
753     assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
754            "Invalid scale!");
755     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
756     Res->Mem.SegReg   = SegReg;
757     Res->Mem.Disp     = Disp;
758     Res->Mem.BaseReg  = BaseReg;
759     Res->Mem.DefaultBaseReg = DefaultBaseReg;
760     Res->Mem.IndexReg = IndexReg;
761     Res->Mem.Scale    = Scale;
762     Res->Mem.Size     = Size;
763     Res->Mem.ModeSize = ModeSize;
764     Res->Mem.FrontendSize = FrontendSize;
765     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
766     Res->UseUpRegs = UseUpRegs;
767     Res->SymName      = SymName;
768     Res->OpDecl       = OpDecl;
769     Res->AddressOf    = false;
770     return Res;
771   }
772 };
773 
774 } // end namespace llvm
775 
776 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
777