xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the X86MCCodeEmitter class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86FixupKinds.h"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCFixup.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrDesc.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCSymbol.h"
27 #include "llvm/Support/Casting.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include <cassert>
30 #include <cstdint>
31 #include <cstdlib>
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "mccodeemitter"
36 
37 namespace {
38 
39 enum PrefixKind { None, REX, REX2, XOP, VEX2, VEX3, EVEX };
40 
41 static void emitByte(uint8_t C, SmallVectorImpl<char> &CB) { CB.push_back(C); }
42 
43 class X86OpcodePrefixHelper {
44   // REX (1 byte)
45   // +-----+ +------+
46   // | 40H | | WRXB |
47   // +-----+ +------+
48 
49   // REX2 (2 bytes)
50   // +-----+ +-------------------+
51   // | D5H | | M | R'X'B' | WRXB |
52   // +-----+ +-------------------+
53 
54   // XOP (3-byte)
55   // +-----+ +--------------+ +-------------------+
56   // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
57   // +-----+ +--------------+ +-------------------+
58 
59   // VEX2 (2 bytes)
60   // +-----+ +-------------------+
61   // | C5h | | R | vvvv | L | pp |
62   // +-----+ +-------------------+
63 
64   // VEX3 (3 bytes)
65   // +-----+ +--------------+ +-------------------+
66   // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
67   // +-----+ +--------------+ +-------------------+
68 
69   // VEX_R: opcode externsion equivalent to REX.R in
70   // 1's complement (inverted) form
71   //
72   //  1: Same as REX_R=0 (must be 1 in 32-bit mode)
73   //  0: Same as REX_R=1 (64 bit mode only)
74 
75   // VEX_X: equivalent to REX.X, only used when a
76   // register is used for index in SIB Byte.
77   //
78   //  1: Same as REX.X=0 (must be 1 in 32-bit mode)
79   //  0: Same as REX.X=1 (64-bit mode only)
80 
81   // VEX_B:
82   //  1: Same as REX_B=0 (ignored in 32-bit mode)
83   //  0: Same as REX_B=1 (64 bit mode only)
84 
85   // VEX_W: opcode specific (use like REX.W, or used for
86   // opcode extension, or ignored, depending on the opcode byte)
87 
88   // VEX_5M (VEX m-mmmmm field):
89   //
90   //  0b00000: Reserved for future use
91   //  0b00001: implied 0F leading opcode
92   //  0b00010: implied 0F 38 leading opcode bytes
93   //  0b00011: implied 0F 3A leading opcode bytes
94   //  0b00100: Reserved for future use
95   //  0b00101: VEX MAP5
96   //  0b00110: VEX MAP6
97   //  0b00111: VEX MAP7
98   //  0b00111-0b11111: Reserved for future use
99   //  0b01000: XOP map select - 08h instructions with imm byte
100   //  0b01001: XOP map select - 09h instructions with no imm byte
101   //  0b01010: XOP map select - 0Ah instructions with imm dword
102 
103   // VEX_4V (VEX vvvv field): a register specifier
104   // (in 1's complement form) or 1111 if unused.
105 
106   // VEX_PP: opcode extension providing equivalent
107   // functionality of a SIMD prefix
108   //  0b00: None
109   //  0b01: 66
110   //  0b10: F3
111   //  0b11: F2
112 
113   // EVEX (4 bytes)
114   // +-----+ +---------------+ +--------------------+ +------------------------+
115   // | 62h | | RXBR' | B'mmm | | W | vvvv | X' | pp | | z | L'L | b | v' | aaa |
116   // +-----+ +---------------+ +--------------------+ +------------------------+
117 
118   // EVEX_L2/VEX_L (Vector Length):
119   // L2 L
120   //  0 0: scalar or 128-bit vector
121   //  0 1: 256-bit vector
122   //  1 0: 512-bit vector
123 
124   // 32-Register Support in 64-bit Mode Using EVEX with Embedded REX/REX2 Bits:
125   //
126   // +----------+---------+--------+-----------+---------+--------------+
127   // |          |    4    |    3   |   [2:0]   | Type    | Common Usage |
128   // +----------+---------+--------+-----------+---------+--------------+
129   // | REG      | EVEX_R' | EVEX_R | modrm.reg | GPR, VR | Dest or Src  |
130   // | VVVV     | EVEX_v' |       EVEX.vvvv    | GPR, VR | Dest or Src  |
131   // | RM (VR)  | EVEX_X  | EVEX_B | modrm.r/m | VR      | Dest or Src  |
132   // | RM (GPR) | EVEX_B' | EVEX_B | modrm.r/m | GPR     | Dest or Src  |
133   // | BASE     | EVEX_B' | EVEX_B | modrm.r/m | GPR     | MA           |
134   // | INDEX    | EVEX_X' | EVEX_X | sib.index | GPR     | MA           |
135   // | VIDX     | EVEX_v' | EVEX_X | sib.index | VR      | VSIB MA      |
136   // +----------+---------+--------+-----------+---------+--------------+
137   //
138   // * GPR  - General-purpose register
139   // * VR   - Vector register
140   // * VIDX - Vector index
141   // * VSIB - Vector SIB
142   // * MA   - Memory addressing
143 
144 private:
145   unsigned W : 1;
146   unsigned R : 1;
147   unsigned X : 1;
148   unsigned B : 1;
149   unsigned M : 1;
150   unsigned R2 : 1;
151   unsigned X2 : 1;
152   unsigned B2 : 1;
153   unsigned VEX_4V : 4;
154   unsigned VEX_L : 1;
155   unsigned VEX_PP : 2;
156   unsigned VEX_5M : 5;
157   unsigned EVEX_z : 1;
158   unsigned EVEX_L2 : 1;
159   unsigned EVEX_b : 1;
160   unsigned EVEX_V2 : 1;
161   unsigned EVEX_aaa : 3;
162   PrefixKind Kind = None;
163   const MCRegisterInfo &MRI;
164 
165   unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const {
166     return MRI.getEncodingValue(MI.getOperand(OpNum).getReg());
167   }
168 
169   void setR(unsigned Encoding) { R = Encoding >> 3 & 1; }
170   void setR2(unsigned Encoding) {
171     R2 = Encoding >> 4 & 1;
172     assert((!R2 || (Kind <= REX2 || Kind == EVEX)) && "invalid setting");
173   }
174   void setX(unsigned Encoding) { X = Encoding >> 3 & 1; }
175   void setX2(unsigned Encoding) {
176     assert((Kind <= REX2 || Kind == EVEX) && "invalid setting");
177     X2 = Encoding >> 4 & 1;
178   }
179   void setB(unsigned Encoding) { B = Encoding >> 3 & 1; }
180   void setB2(unsigned Encoding) {
181     assert((Kind <= REX2 || Kind == EVEX) && "invalid setting");
182     B2 = Encoding >> 4 & 1;
183   }
184   void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; }
185   void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; }
186 
187 public:
188   void setW(bool V) { W = V; }
189   void setR(const MCInst &MI, unsigned OpNum) {
190     setR(getRegEncoding(MI, OpNum));
191   }
192   void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
193     unsigned Reg = MI.getOperand(OpNum).getReg();
194     // X is used to extend vector register only when shift is not 3.
195     if (Shift != 3 && X86II::isApxExtendedReg(Reg))
196       return;
197     unsigned Encoding = MRI.getEncodingValue(Reg);
198     X = Encoding >> Shift & 1;
199   }
200   void setB(const MCInst &MI, unsigned OpNum) {
201     B = getRegEncoding(MI, OpNum) >> 3 & 1;
202   }
203   void set4V(const MCInst &MI, unsigned OpNum) {
204     set4V(getRegEncoding(MI, OpNum));
205   }
206   void setL(bool V) { VEX_L = V; }
207   void setPP(unsigned V) { VEX_PP = V; }
208   void set5M(unsigned V) { VEX_5M = V; }
209   void setR2(const MCInst &MI, unsigned OpNum) {
210     setR2(getRegEncoding(MI, OpNum));
211   }
212   void setRR2(const MCInst &MI, unsigned OpNum) {
213     unsigned Encoding = getRegEncoding(MI, OpNum);
214     setR(Encoding);
215     setR2(Encoding);
216   }
217   void setM(bool V) { M = V; }
218   void setXX2(const MCInst &MI, unsigned OpNum) {
219     unsigned Reg = MI.getOperand(OpNum).getReg();
220     unsigned Encoding = MRI.getEncodingValue(Reg);
221     setX(Encoding);
222     // Index can be a vector register while X2 is used to extend GPR only.
223     if (Kind <= REX2 || X86II::isApxExtendedReg(Reg))
224       setX2(Encoding);
225   }
226   void setBB2(const MCInst &MI, unsigned OpNum) {
227     unsigned Reg = MI.getOperand(OpNum).getReg();
228     unsigned Encoding = MRI.getEncodingValue(Reg);
229     setB(Encoding);
230     // Base can be a vector register while B2 is used to extend GPR only
231     if (Kind <= REX2 || X86II::isApxExtendedReg(Reg))
232       setB2(Encoding);
233   }
234   void setZ(bool V) { EVEX_z = V; }
235   void setL2(bool V) { EVEX_L2 = V; }
236   void setEVEX_b(bool V) { EVEX_b = V; }
237   void setV2(const MCInst &MI, unsigned OpNum, bool HasVEX_4V) {
238     // Only needed with VSIB which don't use VVVV.
239     if (HasVEX_4V)
240       return;
241     unsigned Reg = MI.getOperand(OpNum).getReg();
242     if (X86II::isApxExtendedReg(Reg))
243       return;
244     setV2(MRI.getEncodingValue(Reg));
245   }
246   void set4VV2(const MCInst &MI, unsigned OpNum) {
247     unsigned Encoding = getRegEncoding(MI, OpNum);
248     set4V(Encoding);
249     setV2(Encoding);
250   }
251   void setAAA(const MCInst &MI, unsigned OpNum) {
252     EVEX_aaa = getRegEncoding(MI, OpNum);
253   }
254   void setNF(bool V) { EVEX_aaa |= V << 2; }
255 
256   X86OpcodePrefixHelper(const MCRegisterInfo &MRI)
257       : W(0), R(0), X(0), B(0), M(0), R2(0), X2(0), B2(0), VEX_4V(0), VEX_L(0),
258         VEX_PP(0), VEX_5M(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0),
259         EVEX_aaa(0), MRI(MRI) {}
260 
261   void setLowerBound(PrefixKind K) { Kind = K; }
262 
263   PrefixKind determineOptimalKind() {
264     switch (Kind) {
265     case None:
266       // Not M bit here by intention b/c
267       // 1. No guarantee that REX2 is supported by arch w/o explict EGPR
268       // 2. REX2 is longer than 0FH
269       Kind = (R2 | X2 | B2) ? REX2 : (W | R | X | B) ? REX : None;
270       break;
271     case REX:
272       Kind = (R2 | X2 | B2) ? REX2 : REX;
273       break;
274     case REX2:
275     case XOP:
276     case VEX3:
277     case EVEX:
278       break;
279     case VEX2:
280       Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2;
281       break;
282     }
283     return Kind;
284   }
285 
286   void emit(SmallVectorImpl<char> &CB) const {
287     uint8_t FirstPayload =
288         ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5;
289     uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP;
290     switch (Kind) {
291     case None:
292       return;
293     case REX:
294       emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, CB);
295       return;
296     case REX2:
297       emitByte(0xD5, CB);
298       emitByte(M << 7 | R2 << 6 | X2 << 5 | B2 << 4 | W << 3 | R << 2 | X << 1 |
299                    B,
300                CB);
301       return;
302     case VEX2:
303       emitByte(0xC5, CB);
304       emitByte(((~R) & 1) << 7 | LastPayload, CB);
305       return;
306     case VEX3:
307     case XOP:
308       emitByte(Kind == VEX3 ? 0xC4 : 0x8F, CB);
309       emitByte(FirstPayload | VEX_5M, CB);
310       emitByte(W << 7 | LastPayload, CB);
311       return;
312     case EVEX:
313       assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!");
314       emitByte(0x62, CB);
315       emitByte(FirstPayload | ((~R2) & 0x1) << 4 | B2 << 3 | VEX_5M, CB);
316       emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | ((~X2) & 0x1) << 2 | VEX_PP,
317                CB);
318       emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 |
319                    ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa,
320                CB);
321       return;
322     }
323   }
324 };
325 
326 class X86MCCodeEmitter : public MCCodeEmitter {
327   const MCInstrInfo &MCII;
328   MCContext &Ctx;
329 
330 public:
331   X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
332       : MCII(mcii), Ctx(ctx) {}
333   X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
334   X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
335   ~X86MCCodeEmitter() override = default;
336 
337   void emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
338                   const MCSubtargetInfo &STI) const override;
339 
340   void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
341                          SmallVectorImpl<MCFixup> &Fixups,
342                          const MCSubtargetInfo &STI) const override;
343 
344 private:
345   unsigned getX86RegNum(const MCOperand &MO) const;
346 
347   unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
348 
349   void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
350                      MCFixupKind FixupKind, uint64_t StartByte,
351                      SmallVectorImpl<char> &CB,
352                      SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
353 
354   void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
355                         SmallVectorImpl<char> &CB) const;
356 
357   void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
358                    SmallVectorImpl<char> &CB) const;
359 
360   void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
361                         uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte,
362                         SmallVectorImpl<char> &CB,
363                         SmallVectorImpl<MCFixup> &Fixups,
364                         const MCSubtargetInfo &STI,
365                         bool ForceSIB = false) const;
366 
367   PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
368                             const MCSubtargetInfo &STI,
369                             SmallVectorImpl<char> &CB) const;
370 
371   PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
372                                  const MCSubtargetInfo &STI,
373                                  SmallVectorImpl<char> &CB) const;
374 
375   void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
376                                  SmallVectorImpl<char> &CB) const;
377 
378   PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI,
379                               const MCSubtargetInfo &STI,
380                               SmallVectorImpl<char> &CB) const;
381 
382   PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI,
383                            const MCSubtargetInfo &STI,
384                            SmallVectorImpl<char> &CB) const;
385 };
386 
387 } // end anonymous namespace
388 
389 static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
390   assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
391   return RM | (RegOpcode << 3) | (Mod << 6);
392 }
393 
394 static void emitConstant(uint64_t Val, unsigned Size,
395                          SmallVectorImpl<char> &CB) {
396   // Output the constant in little endian byte order.
397   for (unsigned i = 0; i != Size; ++i) {
398     emitByte(Val & 255, CB);
399     Val >>= 8;
400   }
401 }
402 
403 /// Determine if this immediate can fit in a disp8 or a compressed disp8 for
404 /// EVEX instructions. \p will be set to the value to pass to the ImmOffset
405 /// parameter of emitImmediate.
406 static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
407   bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
408 
409   unsigned CD8_Scale =
410       (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
411   CD8_Scale = CD8_Scale ? 1U << (CD8_Scale - 1) : 0U;
412   if (!HasEVEX || !CD8_Scale)
413     return isInt<8>(Value);
414 
415   assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
416   if (Value & (CD8_Scale - 1)) // Unaligned offset
417     return false;
418 
419   int CDisp8 = Value / static_cast<int>(CD8_Scale);
420   if (!isInt<8>(CDisp8))
421     return false;
422 
423   // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
424   ImmOffset = CDisp8 - Value;
425   return true;
426 }
427 
428 /// \returns the appropriate fixup kind to use for an immediate in an
429 /// instruction with the specified TSFlags.
430 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
431   unsigned Size = X86II::getSizeOfImm(TSFlags);
432   bool isPCRel = X86II::isImmPCRel(TSFlags);
433 
434   if (X86II::isImmSigned(TSFlags)) {
435     switch (Size) {
436     default:
437       llvm_unreachable("Unsupported signed fixup size!");
438     case 4:
439       return MCFixupKind(X86::reloc_signed_4byte);
440     }
441   }
442   return MCFixup::getKindForSize(Size, isPCRel);
443 }
444 
445 enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff };
446 
447 /// Check if this expression starts with  _GLOBAL_OFFSET_TABLE_ and if it is
448 /// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
449 /// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
450 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
451 /// binary expression.
452 static GlobalOffsetTableExprKind
453 startsWithGlobalOffsetTable(const MCExpr *Expr) {
454   const MCExpr *RHS = nullptr;
455   if (Expr->getKind() == MCExpr::Binary) {
456     const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
457     Expr = BE->getLHS();
458     RHS = BE->getRHS();
459   }
460 
461   if (Expr->getKind() != MCExpr::SymbolRef)
462     return GOT_None;
463 
464   const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
465   const MCSymbol &S = Ref->getSymbol();
466   if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
467     return GOT_None;
468   if (RHS && RHS->getKind() == MCExpr::SymbolRef)
469     return GOT_SymDiff;
470   return GOT_Normal;
471 }
472 
473 static bool hasSecRelSymbolRef(const MCExpr *Expr) {
474   if (Expr->getKind() == MCExpr::SymbolRef) {
475     const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
476     return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
477   }
478   return false;
479 }
480 
481 static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
482   unsigned Opcode = MI.getOpcode();
483   const MCInstrDesc &Desc = MCII.get(Opcode);
484   if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
485        Opcode != X86::JCC_4) ||
486       getImmFixupKind(Desc.TSFlags) != FK_PCRel_4)
487     return false;
488 
489   unsigned CurOp = X86II::getOperandBias(Desc);
490   const MCOperand &Op = MI.getOperand(CurOp);
491   if (!Op.isExpr())
492     return false;
493 
494   const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
495   return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
496 }
497 
498 unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
499   return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
500 }
501 
502 unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
503                                              unsigned OpNum) const {
504   return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg());
505 }
506 
507 void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
508                                      unsigned Size, MCFixupKind FixupKind,
509                                      uint64_t StartByte,
510                                      SmallVectorImpl<char> &CB,
511                                      SmallVectorImpl<MCFixup> &Fixups,
512                                      int ImmOffset) const {
513   const MCExpr *Expr = nullptr;
514   if (DispOp.isImm()) {
515     // If this is a simple integer displacement that doesn't require a
516     // relocation, emit it now.
517     if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
518         FixupKind != FK_PCRel_4) {
519       emitConstant(DispOp.getImm() + ImmOffset, Size, CB);
520       return;
521     }
522     Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
523   } else {
524     Expr = DispOp.getExpr();
525   }
526 
527   // If we have an immoffset, add it to the expression.
528   if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
529        FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
530     GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr);
531     if (Kind != GOT_None) {
532       assert(ImmOffset == 0);
533 
534       if (Size == 8) {
535         FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
536       } else {
537         assert(Size == 4);
538         FixupKind = MCFixupKind(X86::reloc_global_offset_table);
539       }
540 
541       if (Kind == GOT_Normal)
542         ImmOffset = static_cast<int>(CB.size() - StartByte);
543     } else if (Expr->getKind() == MCExpr::SymbolRef) {
544       if (hasSecRelSymbolRef(Expr)) {
545         FixupKind = MCFixupKind(FK_SecRel_4);
546       }
547     } else if (Expr->getKind() == MCExpr::Binary) {
548       const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
549       if (hasSecRelSymbolRef(Bin->getLHS()) ||
550           hasSecRelSymbolRef(Bin->getRHS())) {
551         FixupKind = MCFixupKind(FK_SecRel_4);
552       }
553     }
554   }
555 
556   // If the fixup is pc-relative, we need to bias the value to be relative to
557   // the start of the field, not the end of the field.
558   if (FixupKind == FK_PCRel_4 ||
559       FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
560       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
561       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
562       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
563       FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) {
564     ImmOffset -= 4;
565     // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
566     // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
567     // this needs to be a GOTPC32 relocation.
568     if (startsWithGlobalOffsetTable(Expr) != GOT_None)
569       FixupKind = MCFixupKind(X86::reloc_global_offset_table);
570   }
571   if (FixupKind == FK_PCRel_2)
572     ImmOffset -= 2;
573   if (FixupKind == FK_PCRel_1)
574     ImmOffset -= 1;
575 
576   if (ImmOffset)
577     Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
578                                    Ctx);
579 
580   // Emit a symbolic constant as a fixup and 4 zeros.
581   Fixups.push_back(MCFixup::create(static_cast<uint32_t>(CB.size() - StartByte),
582                                    Expr, FixupKind, Loc));
583   emitConstant(0, Size, CB);
584 }
585 
586 void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
587                                         unsigned RegOpcodeFld,
588                                         SmallVectorImpl<char> &CB) const {
589   emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), CB);
590 }
591 
592 void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
593                                    SmallVectorImpl<char> &CB) const {
594   // SIB byte is in the same format as the modRMByte.
595   emitByte(modRMByte(SS, Index, Base), CB);
596 }
597 
598 void X86MCCodeEmitter::emitMemModRMByte(
599     const MCInst &MI, unsigned Op, unsigned RegOpcodeField, uint64_t TSFlags,
600     PrefixKind Kind, uint64_t StartByte, SmallVectorImpl<char> &CB,
601     SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI,
602     bool ForceSIB) const {
603   const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp);
604   const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
605   const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
606   const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
607   unsigned BaseReg = Base.getReg();
608 
609   // Handle %rip relative addressing.
610   if (BaseReg == X86::RIP ||
611       BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
612     assert(STI.hasFeature(X86::Is64Bit) &&
613            "Rip-relative addressing requires 64-bit mode");
614     assert(IndexReg.getReg() == 0 && !ForceSIB &&
615            "Invalid rip-relative address");
616     emitByte(modRMByte(0, RegOpcodeField, 5), CB);
617 
618     unsigned Opcode = MI.getOpcode();
619     unsigned FixupKind = [&]() {
620       // Enable relaxed relocation only for a MCSymbolRefExpr.  We cannot use a
621       // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
622       if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr())))
623         return X86::reloc_riprel_4byte;
624 
625       // Certain loads for GOT references can be relocated against the symbol
626       // directly if the symbol ends up in the same linkage unit.
627       switch (Opcode) {
628       default:
629         return X86::reloc_riprel_4byte;
630       case X86::MOV64rm:
631         // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
632         // special case because COFF and Mach-O don't support ELF's more
633         // flexible R_X86_64_REX_GOTPCRELX relaxation.
634         // TODO: Support new relocation for REX2.
635         assert(Kind == REX || Kind == REX2);
636         return X86::reloc_riprel_4byte_movq_load;
637       case X86::ADC32rm:
638       case X86::ADD32rm:
639       case X86::AND32rm:
640       case X86::CMP32rm:
641       case X86::MOV32rm:
642       case X86::OR32rm:
643       case X86::SBB32rm:
644       case X86::SUB32rm:
645       case X86::TEST32mr:
646       case X86::XOR32rm:
647       case X86::CALL64m:
648       case X86::JMP64m:
649       case X86::TAILJMPm64:
650       case X86::TEST64mr:
651       case X86::ADC64rm:
652       case X86::ADD64rm:
653       case X86::AND64rm:
654       case X86::CMP64rm:
655       case X86::OR64rm:
656       case X86::SBB64rm:
657       case X86::SUB64rm:
658       case X86::XOR64rm:
659         // We haven't support relocation for REX2 prefix, so temporarily use REX
660         // relocation.
661         // TODO: Support new relocation for REX2.
662         return (Kind == REX || Kind == REX2) ? X86::reloc_riprel_4byte_relax_rex
663                                              : X86::reloc_riprel_4byte_relax;
664       }
665     }();
666 
667     // rip-relative addressing is actually relative to the *next* instruction.
668     // Since an immediate can follow the mod/rm byte for an instruction, this
669     // means that we need to bias the displacement field of the instruction with
670     // the size of the immediate field. If we have this case, add it into the
671     // expression to emit.
672     // Note: rip-relative addressing using immediate displacement values should
673     // not be adjusted, assuming it was the user's intent.
674     int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
675                       ? X86II::getSizeOfImm(TSFlags)
676                       : 0;
677 
678     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
679                   Fixups, -ImmSize);
680     return;
681   }
682 
683   unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U;
684 
685   // 16-bit addressing forms of the ModR/M byte have a different encoding for
686   // the R/M field and are far more limited in which registers can be used.
687   if (X86_MC::is16BitMemOperand(MI, Op, STI)) {
688     if (BaseReg) {
689       // For 32-bit addressing, the row and column values in Table 2-2 are
690       // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
691       // some special cases. And getX86RegNum reflects that numbering.
692       // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
693       // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
694       // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
695       // while values 0-3 indicate the allowed combinations (base+index) of
696       // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
697       //
698       // R16Table[] is a lookup from the normal RegNo, to the row values from
699       // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
700       static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
701       unsigned RMfield = R16Table[BaseRegNo];
702 
703       assert(RMfield && "invalid 16-bit base register");
704 
705       if (IndexReg.getReg()) {
706         unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)];
707 
708         assert(IndexReg16 && "invalid 16-bit index register");
709         // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
710         assert(((IndexReg16 ^ RMfield) & 2) &&
711                "invalid 16-bit base/index register combination");
712         assert(Scale.getImm() == 1 &&
713                "invalid scale for 16-bit memory reference");
714 
715         // Allow base/index to appear in either order (although GAS doesn't).
716         if (IndexReg16 & 2)
717           RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
718         else
719           RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
720       }
721 
722       if (Disp.isImm() && isInt<8>(Disp.getImm())) {
723         if (Disp.getImm() == 0 && RMfield != 6) {
724           // There is no displacement; just the register.
725           emitByte(modRMByte(0, RegOpcodeField, RMfield), CB);
726           return;
727         }
728         // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
729         emitByte(modRMByte(1, RegOpcodeField, RMfield), CB);
730         emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups);
731         return;
732       }
733       // This is the [REG]+disp16 case.
734       emitByte(modRMByte(2, RegOpcodeField, RMfield), CB);
735     } else {
736       assert(IndexReg.getReg() == 0 && "Unexpected index register!");
737       // There is no BaseReg; this is the plain [disp16] case.
738       emitByte(modRMByte(0, RegOpcodeField, 6), CB);
739     }
740 
741     // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
742     emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, CB, Fixups);
743     return;
744   }
745 
746   // Check for presence of {disp8} or {disp32} pseudo prefixes.
747   bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
748   bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
749 
750   // We only allow no displacement if no pseudo prefix is present.
751   bool AllowNoDisp = !UseDisp8 && !UseDisp32;
752   // Disp8 is allowed unless the {disp32} prefix is present.
753   bool AllowDisp8 = !UseDisp32;
754 
755   // Determine whether a SIB byte is needed.
756   if ( // The SIB byte must be used if there is an index register or the
757        // encoding requires a SIB byte.
758       !ForceSIB && IndexReg.getReg() == 0 &&
759       // The SIB byte must be used if the base is ESP/RSP/R12/R20/R28, all of
760       // which encode to an R/M value of 4, which indicates that a SIB byte is
761       // present.
762       BaseRegNo != N86::ESP &&
763       // If there is no base register and we're in 64-bit mode, we need a SIB
764       // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
765       (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
766 
767     if (BaseReg == 0) { // [disp32]     in X86-32 mode
768       emitByte(modRMByte(0, RegOpcodeField, 5), CB);
769       emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups);
770       return;
771     }
772 
773     // If the base is not EBP/ESP/R12/R13/R20/R21/R28/R29 and there is no
774     // displacement, use simple indirect register encoding, this handles
775     // addresses like [EAX]. The encoding for [EBP], [R13], [R20], [R21], [R28]
776     // or [R29] with no displacement means [disp32] so we handle it by emitting
777     // a displacement of 0 later.
778     if (BaseRegNo != N86::EBP) {
779       if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
780         emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
781         return;
782       }
783 
784       // If the displacement is @tlscall, treat it as a zero.
785       if (Disp.isExpr()) {
786         auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr());
787         if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
788           // This is exclusively used by call *a@tlscall(base). The relocation
789           // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
790           Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc()));
791           emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
792           return;
793         }
794       }
795     }
796 
797     // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
798     // Including a compressed disp8 for EVEX instructions that support it.
799     // This also handles the 0 displacement for [EBP], [R13], [R21] or [R29]. We
800     // can't use disp8 if the {disp32} pseudo prefix is present.
801     if (Disp.isImm() && AllowDisp8) {
802       int ImmOffset = 0;
803       if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
804         emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CB);
805         emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
806                       ImmOffset);
807         return;
808       }
809     }
810 
811     // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
812     // Displacement may be 0 for [EBP], [R13], [R21], [R29] case if {disp32}
813     // pseudo prefix prevented using disp8 above.
814     emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), CB);
815     unsigned Opcode = MI.getOpcode();
816     unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
817                                                 : X86::reloc_signed_4byte;
818     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
819                   Fixups);
820     return;
821   }
822 
823   // We need a SIB byte, so start by outputting the ModR/M byte first
824   assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
825          "Cannot use ESP as index reg!");
826 
827   bool ForceDisp32 = false;
828   bool ForceDisp8 = false;
829   int ImmOffset = 0;
830   if (BaseReg == 0) {
831     // If there is no base register, we emit the special case SIB byte with
832     // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
833     BaseRegNo = 5;
834     emitByte(modRMByte(0, RegOpcodeField, 4), CB);
835     ForceDisp32 = true;
836   } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
837              // Base reg can't be EBP/RBP/R13/R21/R29 as that would end up with
838              // '5' as the base field, but that is the magic [*] nomenclature
839              // that indicates no base when mod=0. For these cases we'll emit a
840              // 0 displacement instead.
841              BaseRegNo != N86::EBP) {
842     // Emit no displacement ModR/M byte
843     emitByte(modRMByte(0, RegOpcodeField, 4), CB);
844   } else if (Disp.isImm() && AllowDisp8 &&
845              isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
846     // Displacement fits in a byte or matches an EVEX compressed disp8, use
847     // disp8 encoding. This also handles EBP/R13/R21/R29 base with 0
848     // displacement unless {disp32} pseudo prefix was used.
849     emitByte(modRMByte(1, RegOpcodeField, 4), CB);
850     ForceDisp8 = true;
851   } else {
852     // Otherwise, emit the normal disp32 encoding.
853     emitByte(modRMByte(2, RegOpcodeField, 4), CB);
854     ForceDisp32 = true;
855   }
856 
857   // Calculate what the SS field value should be...
858   static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
859   unsigned SS = SSTable[Scale.getImm()];
860 
861   unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4;
862 
863   emitSIBByte(SS, IndexRegNo, BaseRegNo, CB);
864 
865   // Do we need to output a displacement?
866   if (ForceDisp8)
867     emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
868                   ImmOffset);
869   else if (ForceDisp32)
870     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
871                   StartByte, CB, Fixups);
872 }
873 
874 /// Emit all instruction prefixes.
875 ///
876 /// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used,
877 /// otherwise returns None.
878 PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
879                                             const MCSubtargetInfo &STI,
880                                             SmallVectorImpl<char> &CB) const {
881   uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags;
882   // Determine where the memory operand starts, if present.
883   int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
884   // Emit segment override opcode prefix as needed.
885   if (MemoryOperand != -1) {
886     MemoryOperand += CurOp;
887     emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, CB);
888   }
889 
890   // Emit the repeat opcode prefix as needed.
891   unsigned Flags = MI.getFlags();
892   if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
893     emitByte(0xF3, CB);
894   if (Flags & X86::IP_HAS_REPEAT_NE)
895     emitByte(0xF2, CB);
896 
897   // Emit the address size opcode prefix as needed.
898   if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
899       Flags & X86::IP_HAS_AD_SIZE)
900     emitByte(0x67, CB);
901 
902   uint64_t Form = TSFlags & X86II::FormMask;
903   switch (Form) {
904   default:
905     break;
906   case X86II::RawFrmDstSrc: {
907     // Emit segment override opcode prefix as needed (not for %ds).
908     if (MI.getOperand(2).getReg() != X86::DS)
909       emitSegmentOverridePrefix(2, MI, CB);
910     CurOp += 3; // Consume operands.
911     break;
912   }
913   case X86II::RawFrmSrc: {
914     // Emit segment override opcode prefix as needed (not for %ds).
915     if (MI.getOperand(1).getReg() != X86::DS)
916       emitSegmentOverridePrefix(1, MI, CB);
917     CurOp += 2; // Consume operands.
918     break;
919   }
920   case X86II::RawFrmDst: {
921     ++CurOp; // Consume operand.
922     break;
923   }
924   case X86II::RawFrmMemOffs: {
925     // Emit segment override opcode prefix as needed.
926     emitSegmentOverridePrefix(1, MI, CB);
927     break;
928   }
929   }
930 
931   // REX prefix is optional, but if used must be immediately before the opcode
932   // Encoding type for this instruction.
933   return (TSFlags & X86II::EncodingMask)
934              ? emitVEXOpcodePrefix(MemoryOperand, MI, STI, CB)
935              : emitOpcodePrefix(MemoryOperand, MI, STI, CB);
936 }
937 
938 // AVX instructions are encoded using an encoding scheme that combines
939 // prefix bytes, opcode extension field, operand encoding fields, and vector
940 // length encoding capability into a new prefix, referred to as VEX.
941 
942 // The majority of the AVX-512 family of instructions (operating on
943 // 512/256/128-bit vector register operands) are encoded using a new prefix
944 // (called EVEX).
945 
946 // XOP is a revised subset of what was originally intended as SSE5. It was
947 // changed to be similar but not overlapping with AVX.
948 
949 /// Emit XOP, VEX2, VEX3 or EVEX prefix.
950 /// \returns the used prefix.
951 PrefixKind
952 X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
953                                       const MCSubtargetInfo &STI,
954                                       SmallVectorImpl<char> &CB) const {
955   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
956   uint64_t TSFlags = Desc.TSFlags;
957 
958   assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
959 
960 #ifndef NDEBUG
961   unsigned NumOps = MI.getNumOperands();
962   for (unsigned I = NumOps ? X86II::getOperandBias(Desc) : 0; I != NumOps;
963        ++I) {
964     const MCOperand &MO = MI.getOperand(I);
965     if (!MO.isReg())
966       continue;
967     unsigned Reg = MO.getReg();
968     if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
969       report_fatal_error(
970           "Cannot encode high byte register in VEX/EVEX-prefixed instruction");
971   }
972 #endif
973 
974   X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
975   switch (TSFlags & X86II::EncodingMask) {
976   default:
977     break;
978   case X86II::XOP:
979     Prefix.setLowerBound(XOP);
980     break;
981   case X86II::VEX:
982     // VEX can be 2 byte or 3 byte, not determined yet if not explicit
983     Prefix.setLowerBound(MI.getFlags() & X86::IP_USE_VEX3 ? VEX3 : VEX2);
984     break;
985   case X86II::EVEX:
986     Prefix.setLowerBound(EVEX);
987     break;
988   }
989 
990   Prefix.setW(TSFlags & X86II::REX_W);
991   Prefix.setNF(TSFlags & X86II::EVEX_NF);
992 
993   bool HasEVEX_K = TSFlags & X86II::EVEX_K;
994   bool HasVEX_4V = TSFlags & X86II::VEX_4V;
995   bool IsND = X86II::hasNewDataDest(TSFlags); // IsND implies HasVEX_4V
996   bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
997 
998   switch (TSFlags & X86II::OpMapMask) {
999   default:
1000     llvm_unreachable("Invalid prefix!");
1001   case X86II::TB:
1002     Prefix.set5M(0x1); // 0F
1003     break;
1004   case X86II::T8:
1005     Prefix.set5M(0x2); // 0F 38
1006     break;
1007   case X86II::TA:
1008     Prefix.set5M(0x3); // 0F 3A
1009     break;
1010   case X86II::XOP8:
1011     Prefix.set5M(0x8);
1012     break;
1013   case X86II::XOP9:
1014     Prefix.set5M(0x9);
1015     break;
1016   case X86II::XOPA:
1017     Prefix.set5M(0xA);
1018     break;
1019   case X86II::T_MAP4:
1020     Prefix.set5M(0x4);
1021     break;
1022   case X86II::T_MAP5:
1023     Prefix.set5M(0x5);
1024     break;
1025   case X86II::T_MAP6:
1026     Prefix.set5M(0x6);
1027     break;
1028   case X86II::T_MAP7:
1029     Prefix.set5M(0x7);
1030     break;
1031   }
1032 
1033   Prefix.setL(TSFlags & X86II::VEX_L);
1034   Prefix.setL2(TSFlags & X86II::EVEX_L2);
1035   if ((TSFlags & X86II::EVEX_L2) && STI.hasFeature(X86::FeatureAVX512) &&
1036       !STI.hasFeature(X86::FeatureEVEX512))
1037     report_fatal_error("ZMM registers are not supported without EVEX512");
1038   switch (TSFlags & X86II::OpPrefixMask) {
1039   case X86II::PD:
1040     Prefix.setPP(0x1); // 66
1041     break;
1042   case X86II::XS:
1043     Prefix.setPP(0x2); // F3
1044     break;
1045   case X86II::XD:
1046     Prefix.setPP(0x3); // F2
1047     break;
1048   }
1049 
1050   Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z));
1051   Prefix.setEVEX_b(TSFlags & X86II::EVEX_B);
1052 
1053   bool EncodeRC = false;
1054   uint8_t EVEX_rc = 0;
1055 
1056   unsigned CurOp = X86II::getOperandBias(Desc);
1057 
1058   switch (TSFlags & X86II::FormMask) {
1059   default:
1060     llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
1061   case X86II::MRMDestMem4VOp3CC: {
1062     //  src1(ModR/M), MemAddr, src2(VEX_4V)
1063     Prefix.setRR2(MI, CurOp++);
1064     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1065     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1066     CurOp += X86::AddrNumOperands;
1067     Prefix.set4VV2(MI, CurOp++);
1068     break;
1069   }
1070   case X86II::MRM_C0:
1071   case X86II::RawFrm:
1072     break;
1073   case X86II::MRMDestMemFSIB:
1074   case X86II::MRMDestMem: {
1075     // MRMDestMem instructions forms:
1076     //  MemAddr, src1(ModR/M)
1077     //  MemAddr, src1(VEX_4V), src2(ModR/M)
1078     //  MemAddr, src1(ModR/M), imm8
1079     //
1080     // NDD:
1081     //  dst(VEX_4V), MemAddr, src1(ModR/M)
1082     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1083     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1084     Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V);
1085 
1086     if (IsND)
1087       Prefix.set4VV2(MI, CurOp++);
1088 
1089     CurOp += X86::AddrNumOperands;
1090 
1091     if (HasEVEX_K)
1092       Prefix.setAAA(MI, CurOp++);
1093 
1094     if (!IsND && HasVEX_4V)
1095       Prefix.set4VV2(MI, CurOp++);
1096 
1097     Prefix.setRR2(MI, CurOp++);
1098     break;
1099   }
1100   case X86II::MRMSrcMemFSIB:
1101   case X86II::MRMSrcMem: {
1102     // MRMSrcMem instructions forms:
1103     //  src1(ModR/M), MemAddr
1104     //  src1(ModR/M), src2(VEX_4V), MemAddr
1105     //  src1(ModR/M), MemAddr, imm8
1106     //  src1(ModR/M), MemAddr, src2(Imm[7:4])
1107     //
1108     //  FMA4:
1109     //  dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1110     //
1111     //  NDD:
1112     //  dst(VEX_4V), src1(ModR/M), MemAddr
1113     if (IsND)
1114       Prefix.set4VV2(MI, CurOp++);
1115 
1116     Prefix.setRR2(MI, CurOp++);
1117 
1118     if (HasEVEX_K)
1119       Prefix.setAAA(MI, CurOp++);
1120 
1121     if (!IsND && HasVEX_4V)
1122       Prefix.set4VV2(MI, CurOp++);
1123 
1124     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1125     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1126     Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V);
1127 
1128     break;
1129   }
1130   case X86II::MRMSrcMem4VOp3: {
1131     // Instruction format for 4VOp3:
1132     //   src1(ModR/M), MemAddr, src3(VEX_4V)
1133     Prefix.setRR2(MI, CurOp++);
1134     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1135     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1136     Prefix.set4VV2(MI, CurOp + X86::AddrNumOperands);
1137     break;
1138   }
1139   case X86II::MRMSrcMemOp4: {
1140     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1141     Prefix.setR(MI, CurOp++);
1142     Prefix.set4V(MI, CurOp++);
1143     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1144     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1145     break;
1146   }
1147   case X86II::MRM0m:
1148   case X86II::MRM1m:
1149   case X86II::MRM2m:
1150   case X86II::MRM3m:
1151   case X86II::MRM4m:
1152   case X86II::MRM5m:
1153   case X86II::MRM6m:
1154   case X86II::MRM7m: {
1155     // MRM[0-9]m instructions forms:
1156     //  MemAddr
1157     //  src1(VEX_4V), MemAddr
1158     if (HasVEX_4V)
1159       Prefix.set4VV2(MI, CurOp++);
1160 
1161     if (HasEVEX_K)
1162       Prefix.setAAA(MI, CurOp++);
1163 
1164     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1165     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1166     Prefix.setV2(MI, MemOperand + X86::AddrIndexReg, HasVEX_4V);
1167 
1168     break;
1169   }
1170   case X86II::MRMSrcReg: {
1171     // MRMSrcReg instructions forms:
1172     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1173     //  dst(ModR/M), src1(ModR/M)
1174     //  dst(ModR/M), src1(ModR/M), imm8
1175     //
1176     //  FMA4:
1177     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1178     //
1179     //  NDD:
1180     //  dst(VEX_4V), src1(ModR/M.reg), src2(ModR/M)
1181     if (IsND)
1182       Prefix.set4VV2(MI, CurOp++);
1183     Prefix.setRR2(MI, CurOp++);
1184 
1185     if (HasEVEX_K)
1186       Prefix.setAAA(MI, CurOp++);
1187 
1188     if (!IsND && HasVEX_4V)
1189       Prefix.set4VV2(MI, CurOp++);
1190 
1191     Prefix.setBB2(MI, CurOp);
1192     Prefix.setX(MI, CurOp, 4);
1193     ++CurOp;
1194 
1195     if (TSFlags & X86II::EVEX_B) {
1196       if (HasEVEX_RC) {
1197         unsigned NumOps = Desc.getNumOperands();
1198         unsigned RcOperand = NumOps - 1;
1199         assert(RcOperand >= CurOp);
1200         EVEX_rc = MI.getOperand(RcOperand).getImm();
1201         assert(EVEX_rc <= 3 && "Invalid rounding control!");
1202       }
1203       EncodeRC = true;
1204     }
1205     break;
1206   }
1207   case X86II::MRMSrcReg4VOp3: {
1208     // Instruction format for 4VOp3:
1209     //   src1(ModR/M), src2(ModR/M), src3(VEX_4V)
1210     Prefix.setRR2(MI, CurOp++);
1211     Prefix.setBB2(MI, CurOp++);
1212     Prefix.set4VV2(MI, CurOp++);
1213     break;
1214   }
1215   case X86II::MRMSrcRegOp4: {
1216     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1217     Prefix.setR(MI, CurOp++);
1218     Prefix.set4V(MI, CurOp++);
1219     // Skip second register source (encoded in Imm[7:4])
1220     ++CurOp;
1221 
1222     Prefix.setB(MI, CurOp);
1223     Prefix.setX(MI, CurOp, 4);
1224     ++CurOp;
1225     break;
1226   }
1227   case X86II::MRMDestReg: {
1228     // MRMDestReg instructions forms:
1229     //  dst(ModR/M), src(ModR/M)
1230     //  dst(ModR/M), src(ModR/M), imm8
1231     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M)
1232     //
1233     // NDD:
1234     // dst(VEX_4V), src1(ModR/M), src2(ModR/M)
1235     if (IsND)
1236       Prefix.set4VV2(MI, CurOp++);
1237     Prefix.setBB2(MI, CurOp);
1238     Prefix.setX(MI, CurOp, 4);
1239     ++CurOp;
1240 
1241     if (HasEVEX_K)
1242       Prefix.setAAA(MI, CurOp++);
1243 
1244     if (!IsND && HasVEX_4V)
1245       Prefix.set4VV2(MI, CurOp++);
1246 
1247     Prefix.setRR2(MI, CurOp++);
1248     if (TSFlags & X86II::EVEX_B)
1249       EncodeRC = true;
1250     break;
1251   }
1252   case X86II::MRMr0: {
1253     // MRMr0 instructions forms:
1254     //  11:rrr:000
1255     //  dst(ModR/M)
1256     Prefix.setRR2(MI, CurOp++);
1257     break;
1258   }
1259   case X86II::MRM0r:
1260   case X86II::MRM1r:
1261   case X86II::MRM2r:
1262   case X86II::MRM3r:
1263   case X86II::MRM4r:
1264   case X86II::MRM5r:
1265   case X86II::MRM6r:
1266   case X86II::MRM7r: {
1267     // MRM0r-MRM7r instructions forms:
1268     //  dst(VEX_4V), src(ModR/M), imm8
1269     if (HasVEX_4V)
1270       Prefix.set4VV2(MI, CurOp++);
1271 
1272     if (HasEVEX_K)
1273       Prefix.setAAA(MI, CurOp++);
1274 
1275     Prefix.setBB2(MI, CurOp);
1276     Prefix.setX(MI, CurOp, 4);
1277     ++CurOp;
1278     break;
1279   }
1280   }
1281   if (EncodeRC) {
1282     Prefix.setL(EVEX_rc & 0x1);
1283     Prefix.setL2(EVEX_rc & 0x2);
1284   }
1285   PrefixKind Kind = Prefix.determineOptimalKind();
1286   Prefix.emit(CB);
1287   return Kind;
1288 }
1289 
1290 /// Emit REX prefix which specifies
1291 ///   1) 64-bit instructions,
1292 ///   2) non-default operand size, and
1293 ///   3) use of X86-64 extended registers.
1294 ///
1295 /// \returns the used prefix (REX or None).
1296 PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
1297                                            const MCSubtargetInfo &STI,
1298                                            SmallVectorImpl<char> &CB) const {
1299   if (!STI.hasFeature(X86::Is64Bit))
1300     return None;
1301   X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
1302   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1303   uint64_t TSFlags = Desc.TSFlags;
1304   Prefix.setW(TSFlags & X86II::REX_W);
1305   unsigned NumOps = MI.getNumOperands();
1306   bool UsesHighByteReg = false;
1307 #ifndef NDEBUG
1308   bool HasRegOp = false;
1309 #endif
1310   unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0;
1311   for (unsigned i = CurOp; i != NumOps; ++i) {
1312     const MCOperand &MO = MI.getOperand(i);
1313     if (MO.isReg()) {
1314 #ifndef NDEBUG
1315       HasRegOp = true;
1316 #endif
1317       unsigned Reg = MO.getReg();
1318       if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1319         UsesHighByteReg = true;
1320       // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
1321       if (X86II::isX86_64NonExtLowByteReg(Reg))
1322         Prefix.setLowerBound(REX);
1323     } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
1324       // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
1325       // linker optimizations: even if the instructions we see may not require
1326       // any prefix, they may be replaced by instructions that do. This is
1327       // handled as a special case here so that it also works for hand-written
1328       // assembly without the user needing to write REX, as with GNU as.
1329       const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
1330       if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
1331                   Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
1332         Prefix.setLowerBound(REX);
1333       }
1334     }
1335   }
1336   if ((TSFlags & X86II::ExplicitOpPrefixMask) == X86II::ExplicitREX2Prefix)
1337     Prefix.setLowerBound(REX2);
1338   switch (TSFlags & X86II::FormMask) {
1339   default:
1340     assert(!HasRegOp && "Unexpected form in emitREXPrefix!");
1341     break;
1342   case X86II::RawFrm:
1343   case X86II::RawFrmMemOffs:
1344   case X86II::RawFrmSrc:
1345   case X86II::RawFrmDst:
1346   case X86II::RawFrmDstSrc:
1347     break;
1348   case X86II::AddRegFrm:
1349     Prefix.setBB2(MI, CurOp++);
1350     break;
1351   case X86II::MRMSrcReg:
1352   case X86II::MRMSrcRegCC:
1353     Prefix.setRR2(MI, CurOp++);
1354     Prefix.setBB2(MI, CurOp++);
1355     break;
1356   case X86II::MRMSrcMem:
1357   case X86II::MRMSrcMemCC:
1358     Prefix.setRR2(MI, CurOp++);
1359     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1360     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1361     CurOp += X86::AddrNumOperands;
1362     break;
1363   case X86II::MRMDestReg:
1364     Prefix.setBB2(MI, CurOp++);
1365     Prefix.setRR2(MI, CurOp++);
1366     break;
1367   case X86II::MRMDestMem:
1368     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1369     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1370     CurOp += X86::AddrNumOperands;
1371     Prefix.setRR2(MI, CurOp++);
1372     break;
1373   case X86II::MRMXmCC:
1374   case X86II::MRMXm:
1375   case X86II::MRM0m:
1376   case X86II::MRM1m:
1377   case X86II::MRM2m:
1378   case X86II::MRM3m:
1379   case X86II::MRM4m:
1380   case X86II::MRM5m:
1381   case X86II::MRM6m:
1382   case X86II::MRM7m:
1383     Prefix.setBB2(MI, MemOperand + X86::AddrBaseReg);
1384     Prefix.setXX2(MI, MemOperand + X86::AddrIndexReg);
1385     break;
1386   case X86II::MRMXrCC:
1387   case X86II::MRMXr:
1388   case X86II::MRM0r:
1389   case X86II::MRM1r:
1390   case X86II::MRM2r:
1391   case X86II::MRM3r:
1392   case X86II::MRM4r:
1393   case X86II::MRM5r:
1394   case X86II::MRM6r:
1395   case X86II::MRM7r:
1396     Prefix.setBB2(MI, CurOp++);
1397     break;
1398   }
1399   Prefix.setM((TSFlags & X86II::OpMapMask) == X86II::TB);
1400   PrefixKind Kind = Prefix.determineOptimalKind();
1401   if (Kind && UsesHighByteReg)
1402     report_fatal_error(
1403         "Cannot encode high byte register in REX-prefixed instruction");
1404   Prefix.emit(CB);
1405   return Kind;
1406 }
1407 
1408 /// Emit segment override opcode prefix as needed.
1409 void X86MCCodeEmitter::emitSegmentOverridePrefix(
1410     unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const {
1411   // Check for explicit segment override on memory operand.
1412   if (unsigned Reg = MI.getOperand(SegOperand).getReg())
1413     emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB);
1414 }
1415 
1416 /// Emit all instruction prefixes prior to the opcode.
1417 ///
1418 /// \param MemOperand the operand # of the start of a memory operand if present.
1419 /// If not present, it is -1.
1420 ///
1421 /// \returns the used prefix (REX or None).
1422 PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
1423                                               const MCSubtargetInfo &STI,
1424                                               SmallVectorImpl<char> &CB) const {
1425   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1426   uint64_t TSFlags = Desc.TSFlags;
1427 
1428   // Emit the operand size opcode prefix as needed.
1429   if ((TSFlags & X86II::OpSizeMask) ==
1430       (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
1431     emitByte(0x66, CB);
1432 
1433   // Emit the LOCK opcode prefix.
1434   if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1435     emitByte(0xF0, CB);
1436 
1437   // Emit the NOTRACK opcode prefix.
1438   if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1439     emitByte(0x3E, CB);
1440 
1441   switch (TSFlags & X86II::OpPrefixMask) {
1442   case X86II::PD: // 66
1443     emitByte(0x66, CB);
1444     break;
1445   case X86II::XS: // F3
1446     emitByte(0xF3, CB);
1447     break;
1448   case X86II::XD: // F2
1449     emitByte(0xF2, CB);
1450     break;
1451   }
1452 
1453   // Handle REX prefix.
1454   assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
1455          "REX.W requires 64bit mode.");
1456   PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, CB);
1457 
1458   // 0x0F escape code must be emitted just before the opcode.
1459   switch (TSFlags & X86II::OpMapMask) {
1460   case X86II::TB:        // Two-byte opcode map
1461     // Encoded by M bit in REX2
1462     if (Kind == REX2)
1463       break;
1464     [[fallthrough]];
1465   case X86II::T8:        // 0F 38
1466   case X86II::TA:        // 0F 3A
1467   case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1468     emitByte(0x0F, CB);
1469     break;
1470   }
1471 
1472   switch (TSFlags & X86II::OpMapMask) {
1473   case X86II::T8: // 0F 38
1474     emitByte(0x38, CB);
1475     break;
1476   case X86II::TA: // 0F 3A
1477     emitByte(0x3A, CB);
1478     break;
1479   }
1480 
1481   return Kind;
1482 }
1483 
1484 void X86MCCodeEmitter::emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
1485                                   const MCSubtargetInfo &STI) const {
1486   unsigned Opcode = MI.getOpcode();
1487   const MCInstrDesc &Desc = MCII.get(Opcode);
1488   uint64_t TSFlags = Desc.TSFlags;
1489 
1490   // Pseudo instructions don't get encoded.
1491   if (X86II::isPseudo(TSFlags))
1492     return;
1493 
1494   unsigned CurOp = X86II::getOperandBias(Desc);
1495 
1496   emitPrefixImpl(CurOp, MI, STI, CB);
1497 }
1498 
1499 void X86MCCodeEmitter::encodeInstruction(const MCInst &MI,
1500                                          SmallVectorImpl<char> &CB,
1501                                          SmallVectorImpl<MCFixup> &Fixups,
1502                                          const MCSubtargetInfo &STI) const {
1503   unsigned Opcode = MI.getOpcode();
1504   const MCInstrDesc &Desc = MCII.get(Opcode);
1505   uint64_t TSFlags = Desc.TSFlags;
1506 
1507   // Pseudo instructions don't get encoded.
1508   if (X86II::isPseudo(TSFlags))
1509     return;
1510 
1511   unsigned NumOps = Desc.getNumOperands();
1512   unsigned CurOp = X86II::getOperandBias(Desc);
1513 
1514   uint64_t StartByte = CB.size();
1515 
1516   PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, CB);
1517 
1518   // It uses the VEX.VVVV field?
1519   bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1520   bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1521 
1522   // It uses the EVEX.aaa field?
1523   bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1524   bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1525 
1526   // Used if a register is encoded in 7:4 of immediate.
1527   unsigned I8RegNum = 0;
1528 
1529   uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1530 
1531   if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1532     BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1533 
1534   unsigned OpcodeOffset = 0;
1535 
1536   bool IsND = X86II::hasNewDataDest(TSFlags);
1537 
1538   uint64_t Form = TSFlags & X86II::FormMask;
1539   switch (Form) {
1540   default:
1541     errs() << "FORM: " << Form << "\n";
1542     llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1543   case X86II::Pseudo:
1544     llvm_unreachable("Pseudo instruction shouldn't be emitted");
1545   case X86II::RawFrmDstSrc:
1546   case X86II::RawFrmSrc:
1547   case X86II::RawFrmDst:
1548   case X86II::PrefixByte:
1549     emitByte(BaseOpcode, CB);
1550     break;
1551   case X86II::AddCCFrm: {
1552     // This will be added to the opcode in the fallthrough.
1553     OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
1554     assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
1555     --NumOps; // Drop the operand from the end.
1556     [[fallthrough]];
1557   case X86II::RawFrm:
1558     emitByte(BaseOpcode + OpcodeOffset, CB);
1559 
1560     if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
1561       break;
1562 
1563     const MCOperand &Op = MI.getOperand(CurOp++);
1564     emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
1565                   MCFixupKind(X86::reloc_branch_4byte_pcrel), StartByte, CB,
1566                   Fixups);
1567     break;
1568   }
1569   case X86II::RawFrmMemOffs:
1570     emitByte(BaseOpcode, CB);
1571     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1572                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1573                   StartByte, CB, Fixups);
1574     ++CurOp; // skip segment operand
1575     break;
1576   case X86II::RawFrmImm8:
1577     emitByte(BaseOpcode, CB);
1578     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1579                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1580                   StartByte, CB, Fixups);
1581     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte,
1582                   CB, Fixups);
1583     break;
1584   case X86II::RawFrmImm16:
1585     emitByte(BaseOpcode, CB);
1586     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1587                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1588                   StartByte, CB, Fixups);
1589     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte,
1590                   CB, Fixups);
1591     break;
1592 
1593   case X86II::AddRegFrm:
1594     emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), CB);
1595     break;
1596 
1597   case X86II::MRMDestReg: {
1598     emitByte(BaseOpcode, CB);
1599     unsigned SrcRegNum = CurOp + 1;
1600 
1601     if (HasEVEX_K) // Skip writemask
1602       ++SrcRegNum;
1603 
1604     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1605       ++SrcRegNum;
1606     if (IsND) // Skip the NDD operand encoded in EVEX_VVVV
1607       ++CurOp;
1608 
1609     emitRegModRMByte(MI.getOperand(CurOp),
1610                      getX86RegNum(MI.getOperand(SrcRegNum)), CB);
1611     CurOp = SrcRegNum + 1;
1612     break;
1613   }
1614   case X86II::MRMDestMem4VOp3CC: {
1615     unsigned CC = MI.getOperand(8).getImm();
1616     emitByte(BaseOpcode + CC, CB);
1617     unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1618     emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags,
1619                      Kind, StartByte, CB, Fixups, STI, false);
1620     CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
1621     break;
1622   }
1623   case X86II::MRMDestMemFSIB:
1624   case X86II::MRMDestMem: {
1625     emitByte(BaseOpcode, CB);
1626     unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1627 
1628     if (HasEVEX_K) // Skip writemask
1629       ++SrcRegNum;
1630 
1631     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1632       ++SrcRegNum;
1633 
1634     if (IsND) // Skip new data destination
1635       ++CurOp;
1636 
1637     bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
1638     emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1639                      Kind, StartByte, CB, Fixups, STI, ForceSIB);
1640     CurOp = SrcRegNum + 1;
1641     break;
1642   }
1643   case X86II::MRMSrcReg: {
1644     emitByte(BaseOpcode, CB);
1645     unsigned SrcRegNum = CurOp + 1;
1646 
1647     if (HasEVEX_K) // Skip writemask
1648       ++SrcRegNum;
1649 
1650     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1651       ++SrcRegNum;
1652 
1653     if (IsND) // Skip new data destination
1654       ++CurOp;
1655 
1656     emitRegModRMByte(MI.getOperand(SrcRegNum),
1657                      getX86RegNum(MI.getOperand(CurOp)), CB);
1658     CurOp = SrcRegNum + 1;
1659     if (HasVEX_I8Reg)
1660       I8RegNum = getX86RegEncoding(MI, CurOp++);
1661     // do not count the rounding control operand
1662     if (HasEVEX_RC)
1663       --NumOps;
1664     break;
1665   }
1666   case X86II::MRMSrcReg4VOp3: {
1667     emitByte(BaseOpcode, CB);
1668     unsigned SrcRegNum = CurOp + 1;
1669 
1670     emitRegModRMByte(MI.getOperand(SrcRegNum),
1671                      getX86RegNum(MI.getOperand(CurOp)), CB);
1672     CurOp = SrcRegNum + 1;
1673     ++CurOp; // Encoded in VEX.VVVV
1674     break;
1675   }
1676   case X86II::MRMSrcRegOp4: {
1677     emitByte(BaseOpcode, CB);
1678     unsigned SrcRegNum = CurOp + 1;
1679 
1680     // Skip 1st src (which is encoded in VEX_VVVV)
1681     ++SrcRegNum;
1682 
1683     // Capture 2nd src (which is encoded in Imm[7:4])
1684     assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1685     I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1686 
1687     emitRegModRMByte(MI.getOperand(SrcRegNum),
1688                      getX86RegNum(MI.getOperand(CurOp)), CB);
1689     CurOp = SrcRegNum + 1;
1690     break;
1691   }
1692   case X86II::MRMSrcRegCC: {
1693     unsigned FirstOp = CurOp++;
1694     unsigned SecondOp = CurOp++;
1695 
1696     unsigned CC = MI.getOperand(CurOp++).getImm();
1697     emitByte(BaseOpcode + CC, CB);
1698 
1699     emitRegModRMByte(MI.getOperand(SecondOp),
1700                      getX86RegNum(MI.getOperand(FirstOp)), CB);
1701     break;
1702   }
1703   case X86II::MRMSrcMemFSIB:
1704   case X86II::MRMSrcMem: {
1705     unsigned FirstMemOp = CurOp + 1;
1706 
1707     if (IsND) // Skip new data destination
1708       CurOp++;
1709 
1710     if (HasEVEX_K) // Skip writemask
1711       ++FirstMemOp;
1712 
1713     if (HasVEX_4V)
1714       ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1715 
1716     emitByte(BaseOpcode, CB);
1717 
1718     bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
1719     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1720                      TSFlags, Kind, StartByte, CB, Fixups, STI, ForceSIB);
1721     CurOp = FirstMemOp + X86::AddrNumOperands;
1722     if (HasVEX_I8Reg)
1723       I8RegNum = getX86RegEncoding(MI, CurOp++);
1724     break;
1725   }
1726   case X86II::MRMSrcMem4VOp3: {
1727     unsigned FirstMemOp = CurOp + 1;
1728 
1729     emitByte(BaseOpcode, CB);
1730 
1731     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1732                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1733     CurOp = FirstMemOp + X86::AddrNumOperands;
1734     ++CurOp; // Encoded in VEX.VVVV.
1735     break;
1736   }
1737   case X86II::MRMSrcMemOp4: {
1738     unsigned FirstMemOp = CurOp + 1;
1739 
1740     ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1741 
1742     // Capture second register source (encoded in Imm[7:4])
1743     assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1744     I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1745 
1746     emitByte(BaseOpcode, CB);
1747 
1748     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1749                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1750     CurOp = FirstMemOp + X86::AddrNumOperands;
1751     break;
1752   }
1753   case X86II::MRMSrcMemCC: {
1754     unsigned RegOp = CurOp++;
1755     unsigned FirstMemOp = CurOp;
1756     CurOp = FirstMemOp + X86::AddrNumOperands;
1757 
1758     unsigned CC = MI.getOperand(CurOp++).getImm();
1759     emitByte(BaseOpcode + CC, CB);
1760 
1761     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)),
1762                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1763     break;
1764   }
1765 
1766   case X86II::MRMXrCC: {
1767     unsigned RegOp = CurOp++;
1768 
1769     unsigned CC = MI.getOperand(CurOp++).getImm();
1770     emitByte(BaseOpcode + CC, CB);
1771     emitRegModRMByte(MI.getOperand(RegOp), 0, CB);
1772     break;
1773   }
1774 
1775   case X86II::MRMXr:
1776   case X86II::MRM0r:
1777   case X86II::MRM1r:
1778   case X86II::MRM2r:
1779   case X86II::MRM3r:
1780   case X86II::MRM4r:
1781   case X86II::MRM5r:
1782   case X86II::MRM6r:
1783   case X86II::MRM7r:
1784     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1785       ++CurOp;
1786     if (HasEVEX_K) // Skip writemask
1787       ++CurOp;
1788     emitByte(BaseOpcode, CB);
1789     emitRegModRMByte(MI.getOperand(CurOp++),
1790                      (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CB);
1791     break;
1792   case X86II::MRMr0:
1793     emitByte(BaseOpcode, CB);
1794     emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)), 0), CB);
1795     break;
1796 
1797   case X86II::MRMXmCC: {
1798     unsigned FirstMemOp = CurOp;
1799     CurOp = FirstMemOp + X86::AddrNumOperands;
1800 
1801     unsigned CC = MI.getOperand(CurOp++).getImm();
1802     emitByte(BaseOpcode + CC, CB);
1803 
1804     emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, CB, Fixups,
1805                      STI);
1806     break;
1807   }
1808 
1809   case X86II::MRMXm:
1810   case X86II::MRM0m:
1811   case X86II::MRM1m:
1812   case X86II::MRM2m:
1813   case X86II::MRM3m:
1814   case X86II::MRM4m:
1815   case X86II::MRM5m:
1816   case X86II::MRM6m:
1817   case X86II::MRM7m:
1818     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1819       ++CurOp;
1820     if (HasEVEX_K) // Skip writemask
1821       ++CurOp;
1822     emitByte(BaseOpcode, CB);
1823     emitMemModRMByte(MI, CurOp,
1824                      (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1825                      Kind, StartByte, CB, Fixups, STI);
1826     CurOp += X86::AddrNumOperands;
1827     break;
1828 
1829   case X86II::MRM0X:
1830   case X86II::MRM1X:
1831   case X86II::MRM2X:
1832   case X86II::MRM3X:
1833   case X86II::MRM4X:
1834   case X86II::MRM5X:
1835   case X86II::MRM6X:
1836   case X86II::MRM7X:
1837     emitByte(BaseOpcode, CB);
1838     emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), CB);
1839     break;
1840 
1841   case X86II::MRM_C0:
1842   case X86II::MRM_C1:
1843   case X86II::MRM_C2:
1844   case X86II::MRM_C3:
1845   case X86II::MRM_C4:
1846   case X86II::MRM_C5:
1847   case X86II::MRM_C6:
1848   case X86II::MRM_C7:
1849   case X86II::MRM_C8:
1850   case X86II::MRM_C9:
1851   case X86II::MRM_CA:
1852   case X86II::MRM_CB:
1853   case X86II::MRM_CC:
1854   case X86II::MRM_CD:
1855   case X86II::MRM_CE:
1856   case X86II::MRM_CF:
1857   case X86II::MRM_D0:
1858   case X86II::MRM_D1:
1859   case X86II::MRM_D2:
1860   case X86II::MRM_D3:
1861   case X86II::MRM_D4:
1862   case X86II::MRM_D5:
1863   case X86II::MRM_D6:
1864   case X86II::MRM_D7:
1865   case X86II::MRM_D8:
1866   case X86II::MRM_D9:
1867   case X86II::MRM_DA:
1868   case X86II::MRM_DB:
1869   case X86II::MRM_DC:
1870   case X86II::MRM_DD:
1871   case X86II::MRM_DE:
1872   case X86II::MRM_DF:
1873   case X86II::MRM_E0:
1874   case X86II::MRM_E1:
1875   case X86II::MRM_E2:
1876   case X86II::MRM_E3:
1877   case X86II::MRM_E4:
1878   case X86II::MRM_E5:
1879   case X86II::MRM_E6:
1880   case X86II::MRM_E7:
1881   case X86II::MRM_E8:
1882   case X86II::MRM_E9:
1883   case X86II::MRM_EA:
1884   case X86II::MRM_EB:
1885   case X86II::MRM_EC:
1886   case X86II::MRM_ED:
1887   case X86II::MRM_EE:
1888   case X86II::MRM_EF:
1889   case X86II::MRM_F0:
1890   case X86II::MRM_F1:
1891   case X86II::MRM_F2:
1892   case X86II::MRM_F3:
1893   case X86II::MRM_F4:
1894   case X86II::MRM_F5:
1895   case X86II::MRM_F6:
1896   case X86II::MRM_F7:
1897   case X86II::MRM_F8:
1898   case X86II::MRM_F9:
1899   case X86II::MRM_FA:
1900   case X86II::MRM_FB:
1901   case X86II::MRM_FC:
1902   case X86II::MRM_FD:
1903   case X86II::MRM_FE:
1904   case X86II::MRM_FF:
1905     emitByte(BaseOpcode, CB);
1906     emitByte(0xC0 + Form - X86II::MRM_C0, CB);
1907     break;
1908   }
1909 
1910   if (HasVEX_I8Reg) {
1911     // The last source register of a 4 operand instruction in AVX is encoded
1912     // in bits[7:4] of a immediate byte.
1913     assert(I8RegNum < 16 && "Register encoding out of range");
1914     I8RegNum <<= 4;
1915     if (CurOp != NumOps) {
1916       unsigned Val = MI.getOperand(CurOp++).getImm();
1917       assert(Val < 16 && "Immediate operand value out of range");
1918       I8RegNum |= Val;
1919     }
1920     emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1921                   StartByte, CB, Fixups);
1922   } else {
1923     // If there is a remaining operand, it must be a trailing immediate. Emit it
1924     // according to the right size for the instruction. Some instructions
1925     // (SSE4a extrq and insertq) have two trailing immediates.
1926     while (CurOp != NumOps && NumOps - CurOp <= 2) {
1927       emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1928                     X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1929                     StartByte, CB, Fixups);
1930     }
1931   }
1932 
1933   if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1934     emitByte(X86II::getBaseOpcodeFor(TSFlags), CB);
1935 
1936   assert(CB.size() - StartByte <= 15 &&
1937          "The size of instruction must be no longer than 15.");
1938 #ifndef NDEBUG
1939   // FIXME: Verify.
1940   if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1941     errs() << "Cannot encode all operands of: ";
1942     MI.dump();
1943     errs() << '\n';
1944     abort();
1945   }
1946 #endif
1947 }
1948 
1949 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
1950                                             MCContext &Ctx) {
1951   return new X86MCCodeEmitter(MCII, Ctx);
1952 }
1953