xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/X86Operand.h (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/Support/Casting.h"
21 #include "llvm/Support/SMLoc.h"
22 #include <cassert>
23 #include <memory>
24 
25 namespace llvm {
26 
27 /// X86Operand - Instances of this class represent a parsed X86 machine
28 /// instruction.
29 struct X86Operand final : public MCParsedAsmOperand {
30   enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
31 
32   SMLoc StartLoc, EndLoc;
33   SMLoc OffsetOfLoc;
34   StringRef SymName;
35   void *OpDecl;
36   bool AddressOf;
37 
38   struct TokOp {
39     const char *Data;
40     unsigned Length;
41   };
42 
43   struct RegOp {
44     unsigned RegNo;
45   };
46 
47   struct PrefOp {
48     unsigned Prefixes;
49   };
50 
51   struct ImmOp {
52     const MCExpr *Val;
53     bool LocalRef;
54   };
55 
56   struct MemOp {
57     unsigned SegReg;
58     const MCExpr *Disp;
59     unsigned BaseReg;
60     unsigned DefaultBaseReg;
61     unsigned IndexReg;
62     unsigned Scale;
63     unsigned Size;
64     unsigned ModeSize;
65 
66     /// If the memory operand is unsized and there are multiple instruction
67     /// matches, prefer the one with this size.
68     unsigned FrontendSize;
69   };
70 
71   union {
72     struct TokOp Tok;
73     struct RegOp Reg;
74     struct ImmOp Imm;
75     struct MemOp Mem;
76     struct PrefOp Pref;
77   };
78 
79   X86Operand(KindTy K, SMLoc Start, SMLoc End)
80       : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
81         AddressOf(false) {}
82 
83   StringRef getSymName() override { return SymName; }
84   void *getOpDecl() override { return OpDecl; }
85 
86   /// getStartLoc - Get the location of the first token of this operand.
87   SMLoc getStartLoc() const override { return StartLoc; }
88 
89   /// getEndLoc - Get the location of the last token of this operand.
90   SMLoc getEndLoc() const override { return EndLoc; }
91 
92   /// getLocRange - Get the range between the first and last token of this
93   /// operand.
94   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
95 
96   /// getOffsetOfLoc - Get the location of the offset operator.
97   SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
98 
99   void print(raw_ostream &OS) const override {
100 
101     auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
102       if (Val->getKind() == MCExpr::Constant) {
103         if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
104           OS << VName << Imm;
105       } else if (Val->getKind() == MCExpr::SymbolRef) {
106         if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
107           const MCSymbol &Sym = SRE->getSymbol();
108           if (const char *SymNameStr = Sym.getName().data())
109             OS << VName << SymNameStr;
110         }
111       }
112     };
113 
114     switch (Kind) {
115     case Token:
116       OS << Tok.Data;
117       break;
118     case Register:
119       OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
120       break;
121     case DXRegister:
122       OS << "DXReg";
123       break;
124     case Immediate:
125       PrintImmValue(Imm.Val, "Imm:");
126       break;
127     case Prefix:
128       OS << "Prefix:" << Pref.Prefixes;
129       break;
130     case Memory:
131       OS << "Memory: ModeSize=" << Mem.ModeSize;
132       if (Mem.Size)
133         OS << ",Size=" << Mem.Size;
134       if (Mem.BaseReg)
135         OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
136       if (Mem.IndexReg)
137         OS << ",IndexReg="
138            << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
139       if (Mem.Scale)
140         OS << ",Scale=" << Mem.Scale;
141       if (Mem.Disp)
142         PrintImmValue(Mem.Disp, ",Disp=");
143       if (Mem.SegReg)
144         OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
145       break;
146     }
147   }
148 
149   StringRef getToken() const {
150     assert(Kind == Token && "Invalid access!");
151     return StringRef(Tok.Data, Tok.Length);
152   }
153   void setTokenValue(StringRef Value) {
154     assert(Kind == Token && "Invalid access!");
155     Tok.Data = Value.data();
156     Tok.Length = Value.size();
157   }
158 
159   unsigned getReg() const override {
160     assert(Kind == Register && "Invalid access!");
161     return Reg.RegNo;
162   }
163 
164   unsigned getPrefix() const {
165     assert(Kind == Prefix && "Invalid access!");
166     return Pref.Prefixes;
167   }
168 
169   const MCExpr *getImm() const {
170     assert(Kind == Immediate && "Invalid access!");
171     return Imm.Val;
172   }
173 
174   const MCExpr *getMemDisp() const {
175     assert(Kind == Memory && "Invalid access!");
176     return Mem.Disp;
177   }
178   unsigned getMemSegReg() const {
179     assert(Kind == Memory && "Invalid access!");
180     return Mem.SegReg;
181   }
182   unsigned getMemBaseReg() const {
183     assert(Kind == Memory && "Invalid access!");
184     return Mem.BaseReg;
185   }
186   unsigned getMemDefaultBaseReg() const {
187     assert(Kind == Memory && "Invalid access!");
188     return Mem.DefaultBaseReg;
189   }
190   unsigned getMemIndexReg() const {
191     assert(Kind == Memory && "Invalid access!");
192     return Mem.IndexReg;
193   }
194   unsigned getMemScale() const {
195     assert(Kind == Memory && "Invalid access!");
196     return Mem.Scale;
197   }
198   unsigned getMemModeSize() const {
199     assert(Kind == Memory && "Invalid access!");
200     return Mem.ModeSize;
201   }
202   unsigned getMemFrontendSize() const {
203     assert(Kind == Memory && "Invalid access!");
204     return Mem.FrontendSize;
205   }
206 
207   bool isToken() const override {return Kind == Token; }
208 
209   bool isImm() const override { return Kind == Immediate; }
210 
211   bool isImmSExti16i8() const {
212     if (!isImm())
213       return false;
214 
215     // If this isn't a constant expr, just assume it fits and let relaxation
216     // handle it.
217     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
218     if (!CE)
219       return true;
220 
221     // Otherwise, check the value is in a range that makes sense for this
222     // extension.
223     return isImmSExti16i8Value(CE->getValue());
224   }
225   bool isImmSExti32i8() const {
226     if (!isImm())
227       return false;
228 
229     // If this isn't a constant expr, just assume it fits and let relaxation
230     // handle it.
231     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
232     if (!CE)
233       return true;
234 
235     // Otherwise, check the value is in a range that makes sense for this
236     // extension.
237     return isImmSExti32i8Value(CE->getValue());
238   }
239   bool isImmSExti64i8() const {
240     if (!isImm())
241       return false;
242 
243     // If this isn't a constant expr, just assume it fits and let relaxation
244     // handle it.
245     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
246     if (!CE)
247       return true;
248 
249     // Otherwise, check the value is in a range that makes sense for this
250     // extension.
251     return isImmSExti64i8Value(CE->getValue());
252   }
253   bool isImmSExti64i32() const {
254     if (!isImm())
255       return false;
256 
257     // If this isn't a constant expr, just assume it fits and let relaxation
258     // handle it.
259     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
260     if (!CE)
261       return true;
262 
263     // Otherwise, check the value is in a range that makes sense for this
264     // extension.
265     return isImmSExti64i32Value(CE->getValue());
266   }
267 
268   bool isImmUnsignedi4() const {
269     if (!isImm()) return false;
270     // If this isn't a constant expr, reject it. The immediate byte is shared
271     // with a register encoding. We can't have it affected by a relocation.
272     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
273     if (!CE) return false;
274     return isImmUnsignedi4Value(CE->getValue());
275   }
276 
277   bool isImmUnsignedi8() const {
278     if (!isImm()) return false;
279     // If this isn't a constant expr, just assume it fits and let relaxation
280     // handle it.
281     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
282     if (!CE) return true;
283     return isImmUnsignedi8Value(CE->getValue());
284   }
285 
286   bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
287 
288   bool isMemPlaceholder(const MCInstrDesc &Desc) const override {
289     // Only MS InlineAsm uses global variables with registers rather than
290     // rip/eip.
291     return isMem() && !Mem.DefaultBaseReg && Mem.FrontendSize;
292   }
293 
294   bool needAddressOf() const override { return AddressOf; }
295 
296   bool isMem() const override { return Kind == Memory; }
297   bool isMemUnsized() const {
298     return Kind == Memory && Mem.Size == 0;
299   }
300   bool isMem8() const {
301     return Kind == Memory && (!Mem.Size || Mem.Size == 8);
302   }
303   bool isMem16() const {
304     return Kind == Memory && (!Mem.Size || Mem.Size == 16);
305   }
306   bool isMem32() const {
307     return Kind == Memory && (!Mem.Size || Mem.Size == 32);
308   }
309   bool isMem64() const {
310     return Kind == Memory && (!Mem.Size || Mem.Size == 64);
311   }
312   bool isMem80() const {
313     return Kind == Memory && (!Mem.Size || Mem.Size == 80);
314   }
315   bool isMem128() const {
316     return Kind == Memory && (!Mem.Size || Mem.Size == 128);
317   }
318   bool isMem256() const {
319     return Kind == Memory && (!Mem.Size || Mem.Size == 256);
320   }
321   bool isMem512() const {
322     return Kind == Memory && (!Mem.Size || Mem.Size == 512);
323   }
324 
325   bool isSibMem() const {
326     return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
327   }
328 
329   bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
330     assert(Kind == Memory && "Invalid access!");
331     return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
332   }
333 
334   bool isMem64_RC128() const {
335     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
336   }
337   bool isMem128_RC128() const {
338     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
339   }
340   bool isMem128_RC256() const {
341     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
342   }
343   bool isMem256_RC128() const {
344     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
345   }
346   bool isMem256_RC256() const {
347     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
348   }
349 
350   bool isMem64_RC128X() const {
351     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
352   }
353   bool isMem128_RC128X() const {
354     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
355   }
356   bool isMem128_RC256X() const {
357     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
358   }
359   bool isMem256_RC128X() const {
360     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
361   }
362   bool isMem256_RC256X() const {
363     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
364   }
365   bool isMem256_RC512() const {
366     return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
367   }
368   bool isMem512_RC256X() const {
369     return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
370   }
371   bool isMem512_RC512() const {
372     return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
373   }
374 
375   bool isAbsMem() const {
376     return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
377       !getMemIndexReg() && getMemScale() == 1;
378   }
379   bool isAVX512RC() const{
380       return isImm();
381   }
382 
383   bool isAbsMem16() const {
384     return isAbsMem() && Mem.ModeSize == 16;
385   }
386 
387   bool isSrcIdx() const {
388     return !getMemIndexReg() && getMemScale() == 1 &&
389       (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
390        getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
391       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
392   }
393   bool isSrcIdx8() const {
394     return isMem8() && isSrcIdx();
395   }
396   bool isSrcIdx16() const {
397     return isMem16() && isSrcIdx();
398   }
399   bool isSrcIdx32() const {
400     return isMem32() && isSrcIdx();
401   }
402   bool isSrcIdx64() const {
403     return isMem64() && isSrcIdx();
404   }
405 
406   bool isDstIdx() const {
407     return !getMemIndexReg() && getMemScale() == 1 &&
408       (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
409       (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
410        getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
411       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
412   }
413   bool isDstIdx8() const {
414     return isMem8() && isDstIdx();
415   }
416   bool isDstIdx16() const {
417     return isMem16() && isDstIdx();
418   }
419   bool isDstIdx32() const {
420     return isMem32() && isDstIdx();
421   }
422   bool isDstIdx64() const {
423     return isMem64() && isDstIdx();
424   }
425 
426   bool isMemOffs() const {
427     return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
428       getMemScale() == 1;
429   }
430 
431   bool isMemOffs16_8() const {
432     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
433   }
434   bool isMemOffs16_16() const {
435     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
436   }
437   bool isMemOffs16_32() const {
438     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
439   }
440   bool isMemOffs32_8() const {
441     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
442   }
443   bool isMemOffs32_16() const {
444     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
445   }
446   bool isMemOffs32_32() const {
447     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
448   }
449   bool isMemOffs32_64() const {
450     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
451   }
452   bool isMemOffs64_8() const {
453     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
454   }
455   bool isMemOffs64_16() const {
456     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
457   }
458   bool isMemOffs64_32() const {
459     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
460   }
461   bool isMemOffs64_64() const {
462     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
463   }
464 
465   bool isPrefix() const { return Kind == Prefix; }
466   bool isReg() const override { return Kind == Register; }
467   bool isDXReg() const { return Kind == DXRegister; }
468 
469   bool isGR32orGR64() const {
470     return Kind == Register &&
471       (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
472        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
473   }
474 
475   bool isGR16orGR32orGR64() const {
476     return Kind == Register &&
477       (X86MCRegisterClasses[X86::GR16RegClassID].contains(getReg()) ||
478        X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
479        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
480   }
481 
482   bool isVectorReg() const {
483     return Kind == Register &&
484            (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) ||
485             X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) ||
486             X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) ||
487             X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg()));
488   }
489 
490   bool isVK1Pair() const {
491     return Kind == Register &&
492       X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
493   }
494 
495   bool isVK2Pair() const {
496     return Kind == Register &&
497       X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
498   }
499 
500   bool isVK4Pair() const {
501     return Kind == Register &&
502       X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
503   }
504 
505   bool isVK8Pair() const {
506     return Kind == Register &&
507       X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
508   }
509 
510   bool isVK16Pair() const {
511     return Kind == Register &&
512       X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
513   }
514 
515   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
516     // Add as immediates when possible.
517     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
518       Inst.addOperand(MCOperand::createImm(CE->getValue()));
519     else
520       Inst.addOperand(MCOperand::createExpr(Expr));
521   }
522 
523   void addRegOperands(MCInst &Inst, unsigned N) const {
524     assert(N == 1 && "Invalid number of operands!");
525     Inst.addOperand(MCOperand::createReg(getReg()));
526   }
527 
528   void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
529     assert(N == 1 && "Invalid number of operands!");
530     MCRegister RegNo = getReg();
531     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
532       RegNo = getX86SubSuperRegister(RegNo, 32);
533     Inst.addOperand(MCOperand::createReg(RegNo));
534   }
535 
536   void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
537     assert(N == 1 && "Invalid number of operands!");
538     MCRegister RegNo = getReg();
539     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo) ||
540         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
541       RegNo = getX86SubSuperRegister(RegNo, 16);
542     Inst.addOperand(MCOperand::createReg(RegNo));
543   }
544 
545   void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
546     assert(N == 1 && "Invalid number of operands!");
547     addExpr(Inst, getImm());
548   }
549 
550   void addImmOperands(MCInst &Inst, unsigned N) const {
551     assert(N == 1 && "Invalid number of operands!");
552     addExpr(Inst, getImm());
553   }
554 
555   void addMaskPairOperands(MCInst &Inst, unsigned N) const {
556     assert(N == 1 && "Invalid number of operands!");
557     unsigned Reg = getReg();
558     switch (Reg) {
559     case X86::K0:
560     case X86::K1:
561       Reg = X86::K0_K1;
562       break;
563     case X86::K2:
564     case X86::K3:
565       Reg = X86::K2_K3;
566       break;
567     case X86::K4:
568     case X86::K5:
569       Reg = X86::K4_K5;
570       break;
571     case X86::K6:
572     case X86::K7:
573       Reg = X86::K6_K7;
574       break;
575     }
576     Inst.addOperand(MCOperand::createReg(Reg));
577   }
578 
579   void addMemOperands(MCInst &Inst, unsigned N) const {
580     assert((N == 5) && "Invalid number of operands!");
581     if (getMemBaseReg())
582       Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
583     else
584       Inst.addOperand(MCOperand::createReg(getMemDefaultBaseReg()));
585     Inst.addOperand(MCOperand::createImm(getMemScale()));
586     Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
587     addExpr(Inst, getMemDisp());
588     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
589   }
590 
591   void addAbsMemOperands(MCInst &Inst, unsigned N) const {
592     assert((N == 1) && "Invalid number of operands!");
593     // Add as immediates when possible.
594     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
595       Inst.addOperand(MCOperand::createImm(CE->getValue()));
596     else
597       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
598   }
599 
600   void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
601     assert((N == 2) && "Invalid number of operands!");
602     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
603     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
604   }
605 
606   void addDstIdxOperands(MCInst &Inst, unsigned N) const {
607     assert((N == 1) && "Invalid number of operands!");
608     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
609   }
610 
611   void addMemOffsOperands(MCInst &Inst, unsigned N) const {
612     assert((N == 2) && "Invalid number of operands!");
613     // Add as immediates when possible.
614     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
615       Inst.addOperand(MCOperand::createImm(CE->getValue()));
616     else
617       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
618     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
619   }
620 
621   static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
622     SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
623     auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
624     Res->Tok.Data = Str.data();
625     Res->Tok.Length = Str.size();
626     return Res;
627   }
628 
629   static std::unique_ptr<X86Operand>
630   CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
631             bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
632             StringRef SymName = StringRef(), void *OpDecl = nullptr) {
633     auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
634     Res->Reg.RegNo = RegNo;
635     Res->AddressOf = AddressOf;
636     Res->OffsetOfLoc = OffsetOfLoc;
637     Res->SymName = SymName;
638     Res->OpDecl = OpDecl;
639     return Res;
640   }
641 
642   static std::unique_ptr<X86Operand>
643   CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
644     return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
645   }
646 
647   static std::unique_ptr<X86Operand>
648   CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
649     auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
650     Res->Pref.Prefixes = Prefixes;
651     return Res;
652   }
653 
654   static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
655                                                SMLoc StartLoc, SMLoc EndLoc,
656                                                StringRef SymName = StringRef(),
657                                                void *OpDecl = nullptr,
658                                                bool GlobalRef = true) {
659     auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
660     Res->Imm.Val      = Val;
661     Res->Imm.LocalRef = !GlobalRef;
662     Res->SymName      = SymName;
663     Res->OpDecl       = OpDecl;
664     Res->AddressOf    = true;
665     return Res;
666   }
667 
668   /// Create an absolute memory operand.
669   static std::unique_ptr<X86Operand>
670   CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
671             unsigned Size = 0, StringRef SymName = StringRef(),
672             void *OpDecl = nullptr, unsigned FrontendSize = 0) {
673     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
674     Res->Mem.SegReg   = 0;
675     Res->Mem.Disp     = Disp;
676     Res->Mem.BaseReg  = 0;
677     Res->Mem.DefaultBaseReg = 0;
678     Res->Mem.IndexReg = 0;
679     Res->Mem.Scale    = 1;
680     Res->Mem.Size     = Size;
681     Res->Mem.ModeSize = ModeSize;
682     Res->Mem.FrontendSize = FrontendSize;
683     Res->SymName      = SymName;
684     Res->OpDecl       = OpDecl;
685     Res->AddressOf    = false;
686     return Res;
687   }
688 
689   /// Create a generalized memory operand.
690   static std::unique_ptr<X86Operand>
691   CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
692             unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
693             SMLoc EndLoc, unsigned Size = 0,
694             unsigned DefaultBaseReg = X86::NoRegister,
695             StringRef SymName = StringRef(), void *OpDecl = nullptr,
696             unsigned FrontendSize = 0) {
697     // We should never just have a displacement, that should be parsed as an
698     // absolute memory operand.
699     assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
700            "Invalid memory operand!");
701 
702     // The scale should always be one of {1,2,4,8}.
703     assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
704            "Invalid scale!");
705     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
706     Res->Mem.SegReg   = SegReg;
707     Res->Mem.Disp     = Disp;
708     Res->Mem.BaseReg  = BaseReg;
709     Res->Mem.DefaultBaseReg = DefaultBaseReg;
710     Res->Mem.IndexReg = IndexReg;
711     Res->Mem.Scale    = Scale;
712     Res->Mem.Size     = Size;
713     Res->Mem.ModeSize = ModeSize;
714     Res->Mem.FrontendSize = FrontendSize;
715     Res->SymName      = SymName;
716     Res->OpDecl       = OpDecl;
717     Res->AddressOf    = false;
718     return Res;
719   }
720 };
721 
722 } // end namespace llvm
723 
724 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
725