xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the X86MCCodeEmitter class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86FixupKinds.h"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCFixup.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrDesc.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCSymbol.h"
27 #include "llvm/Support/Casting.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include <cassert>
30 #include <cstdint>
31 #include <cstdlib>
32 
33 using namespace llvm;
34 
35 #define DEBUG_TYPE "mccodeemitter"
36 
37 namespace {
38 
39 enum PrefixKind { None, REX, XOP, VEX2, VEX3, EVEX };
40 
41 static void emitByte(uint8_t C, SmallVectorImpl<char> &CB) { CB.push_back(C); }
42 
43 class X86OpcodePrefixHelper {
44   // REX (1 byte)
45   // +-----+ +------+
46   // | 40H | | WRXB |
47   // +-----+ +------+
48 
49   // XOP (3-byte)
50   // +-----+ +--------------+ +-------------------+
51   // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
52   // +-----+ +--------------+ +-------------------+
53 
54   // VEX2 (2 bytes)
55   // +-----+ +-------------------+
56   // | C5h | | R | vvvv | L | pp |
57   // +-----+ +-------------------+
58 
59   // VEX3 (3 bytes)
60   // +-----+ +--------------+ +-------------------+
61   // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
62   // +-----+ +--------------+ +-------------------+
63 
64   // VEX_R: opcode externsion equivalent to REX.R in
65   // 1's complement (inverted) form
66   //
67   //  1: Same as REX_R=0 (must be 1 in 32-bit mode)
68   //  0: Same as REX_R=1 (64 bit mode only)
69 
70   // VEX_X: equivalent to REX.X, only used when a
71   // register is used for index in SIB Byte.
72   //
73   //  1: Same as REX.X=0 (must be 1 in 32-bit mode)
74   //  0: Same as REX.X=1 (64-bit mode only)
75 
76   // VEX_B:
77   //  1: Same as REX_B=0 (ignored in 32-bit mode)
78   //  0: Same as REX_B=1 (64 bit mode only)
79 
80   // VEX_W: opcode specific (use like REX.W, or used for
81   // opcode extension, or ignored, depending on the opcode byte)
82 
83   // VEX_5M (VEX m-mmmmm field):
84   //
85   //  0b00000: Reserved for future use
86   //  0b00001: implied 0F leading opcode
87   //  0b00010: implied 0F 38 leading opcode bytes
88   //  0b00011: implied 0F 3A leading opcode bytes
89   //  0b00100: Reserved for future use
90   //  0b00101: VEX MAP5
91   //  0b00110: VEX MAP6
92   //  0b00111-0b11111: Reserved for future use
93   //  0b01000: XOP map select - 08h instructions with imm byte
94   //  0b01001: XOP map select - 09h instructions with no imm byte
95   //  0b01010: XOP map select - 0Ah instructions with imm dword
96 
97   // VEX_4V (VEX vvvv field): a register specifier
98   // (in 1's complement form) or 1111 if unused.
99 
100   // VEX_PP: opcode extension providing equivalent
101   // functionality of a SIMD prefix
102   //  0b00: None
103   //  0b01: 66
104   //  0b10: F3
105   //  0b11: F2
106 
107   // EVEX (4 bytes)
108   // +-----+ +--------------+ +-------------------+ +------------------------+
109   // | 62h | | RXBR' | 0mmm | | W | vvvv | 1 | pp | | z | L'L | b | v' | aaa |
110   // +-----+ +--------------+ +-------------------+ +------------------------+
111 
112   // EVEX_L2/VEX_L (Vector Length):
113   // L2 L
114   //  0 0: scalar or 128-bit vector
115   //  0 1: 256-bit vector
116   //  1 0: 512-bit vector
117 
118 private:
119   unsigned W : 1;
120   unsigned R : 1;
121   unsigned X : 1;
122   unsigned B : 1;
123   unsigned VEX_4V : 4;
124   unsigned VEX_L : 1;
125   unsigned VEX_PP : 2;
126   unsigned VEX_5M : 5;
127   unsigned EVEX_R2 : 1;
128   unsigned EVEX_z : 1;
129   unsigned EVEX_L2 : 1;
130   unsigned EVEX_b : 1;
131   unsigned EVEX_V2 : 1;
132   unsigned EVEX_aaa : 3;
133   PrefixKind Kind = None;
134   const MCRegisterInfo &MRI;
135 
136   unsigned getRegEncoding(const MCInst &MI, unsigned OpNum) const {
137     return MRI.getEncodingValue(MI.getOperand(OpNum).getReg());
138   }
139 
140   void setR(unsigned Encoding) { R = Encoding >> 3 & 1; }
141   void setR2(unsigned Encoding) { EVEX_R2 = Encoding >> 4 & 1; }
142   void set4V(unsigned Encoding) { VEX_4V = Encoding & 0xf; }
143   void setV2(unsigned Encoding) { EVEX_V2 = Encoding >> 4 & 1; }
144 
145 public:
146   void setW(bool V) { W = V; }
147   void setR(const MCInst &MI, unsigned OpNum) {
148     setR(getRegEncoding(MI, OpNum));
149   }
150   void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
151     X = getRegEncoding(MI, OpNum) >> Shift & 1;
152   }
153   void setB(const MCInst &MI, unsigned OpNum) {
154     B = getRegEncoding(MI, OpNum) >> 3 & 1;
155   }
156   void set4V(const MCInst &MI, unsigned OpNum) {
157     set4V(getRegEncoding(MI, OpNum));
158   }
159   void setL(bool V) { VEX_L = V; }
160   void setPP(unsigned V) { VEX_PP = V; }
161   void set5M(unsigned V) { VEX_5M = V; }
162   void setR2(const MCInst &MI, unsigned OpNum) {
163     setR2(getRegEncoding(MI, OpNum));
164   }
165   void setRR2(const MCInst &MI, unsigned OpNum) {
166     unsigned Encoding = getRegEncoding(MI, OpNum);
167     setR(Encoding);
168     setR2(Encoding);
169   }
170   void setZ(bool V) { EVEX_z = V; }
171   void setL2(bool V) { EVEX_L2 = V; }
172   void setEVEX_b(bool V) { EVEX_b = V; }
173   void setV2(const MCInst &MI, unsigned OpNum) {
174     setV2(getRegEncoding(MI, OpNum));
175   }
176   void set4VV2(const MCInst &MI, unsigned OpNum) {
177     unsigned Encoding = getRegEncoding(MI, OpNum);
178     set4V(Encoding);
179     setV2(Encoding);
180   }
181   void setAAA(const MCInst &MI, unsigned OpNum) {
182     EVEX_aaa = getRegEncoding(MI, OpNum);
183   }
184 
185   X86OpcodePrefixHelper(const MCRegisterInfo &MRI)
186       : W(0), R(0), X(0), B(0), VEX_4V(0), VEX_L(0), VEX_PP(0), VEX_5M(0),
187         EVEX_R2(0), EVEX_z(0), EVEX_L2(0), EVEX_b(0), EVEX_V2(0), EVEX_aaa(0),
188         MRI(MRI) {}
189 
190   void setLowerBound(PrefixKind K) { Kind = K; }
191 
192   PrefixKind determineOptimalKind() {
193     switch (Kind) {
194     case None:
195       Kind = (W | R | X | B) ? REX : None;
196       break;
197     case REX:
198     case XOP:
199     case VEX3:
200     case EVEX:
201       break;
202     case VEX2:
203       Kind = (W | X | B | (VEX_5M != 1)) ? VEX3 : VEX2;
204       break;
205     }
206     return Kind;
207   }
208 
209   void emit(SmallVectorImpl<char> &CB) const {
210     uint8_t FirstPayload =
211         ((~R) & 0x1) << 7 | ((~X) & 0x1) << 6 | ((~B) & 0x1) << 5;
212     uint8_t LastPayload = ((~VEX_4V) & 0xf) << 3 | VEX_L << 2 | VEX_PP;
213     switch (Kind) {
214     case None:
215       return;
216     case REX:
217       emitByte(0x40 | W << 3 | R << 2 | X << 1 | B, CB);
218       return;
219     case VEX2:
220       emitByte(0xC5, CB);
221       emitByte(((~R) & 1) << 7 | LastPayload, CB);
222       return;
223     case VEX3:
224     case XOP:
225       emitByte(Kind == VEX3 ? 0xC4 : 0x8F, CB);
226       emitByte(FirstPayload | VEX_5M, CB);
227       emitByte(W << 7 | LastPayload, CB);
228       return;
229     case EVEX:
230       assert(VEX_5M && !(VEX_5M & 0x8) && "invalid mmm fields for EVEX!");
231       emitByte(0x62, CB);
232       emitByte(FirstPayload | ((~EVEX_R2) & 0x1) << 4 | VEX_5M, CB);
233       emitByte(W << 7 | ((~VEX_4V) & 0xf) << 3 | 1 << 2 | VEX_PP, CB);
234       emitByte(EVEX_z << 7 | EVEX_L2 << 6 | VEX_L << 5 | EVEX_b << 4 |
235                    ((~EVEX_V2) & 0x1) << 3 | EVEX_aaa,
236                CB);
237       return;
238     }
239   }
240 };
241 
242 class X86MCCodeEmitter : public MCCodeEmitter {
243   const MCInstrInfo &MCII;
244   MCContext &Ctx;
245 
246 public:
247   X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
248       : MCII(mcii), Ctx(ctx) {}
249   X86MCCodeEmitter(const X86MCCodeEmitter &) = delete;
250   X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete;
251   ~X86MCCodeEmitter() override = default;
252 
253   void emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
254                   const MCSubtargetInfo &STI) const override;
255 
256   void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB,
257                          SmallVectorImpl<MCFixup> &Fixups,
258                          const MCSubtargetInfo &STI) const override;
259 
260 private:
261   unsigned getX86RegNum(const MCOperand &MO) const;
262 
263   unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const;
264 
265   void emitImmediate(const MCOperand &Disp, SMLoc Loc, unsigned ImmSize,
266                      MCFixupKind FixupKind, uint64_t StartByte,
267                      SmallVectorImpl<char> &CB,
268                      SmallVectorImpl<MCFixup> &Fixups, int ImmOffset = 0) const;
269 
270   void emitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
271                         SmallVectorImpl<char> &CB) const;
272 
273   void emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
274                    SmallVectorImpl<char> &CB) const;
275 
276   void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
277                         uint64_t TSFlags, PrefixKind Kind, uint64_t StartByte,
278                         SmallVectorImpl<char> &CB,
279                         SmallVectorImpl<MCFixup> &Fixups,
280                         const MCSubtargetInfo &STI,
281                         bool ForceSIB = false) const;
282 
283   PrefixKind emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
284                             const MCSubtargetInfo &STI,
285                             SmallVectorImpl<char> &CB) const;
286 
287   PrefixKind emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
288                                  SmallVectorImpl<char> &CB) const;
289 
290   void emitSegmentOverridePrefix(unsigned SegOperand, const MCInst &MI,
291                                  SmallVectorImpl<char> &CB) const;
292 
293   PrefixKind emitOpcodePrefix(int MemOperand, const MCInst &MI,
294                               const MCSubtargetInfo &STI,
295                               SmallVectorImpl<char> &CB) const;
296 
297   PrefixKind emitREXPrefix(int MemOperand, const MCInst &MI,
298                            const MCSubtargetInfo &STI,
299                            SmallVectorImpl<char> &CB) const;
300 };
301 
302 } // end anonymous namespace
303 
304 static uint8_t modRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) {
305   assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
306   return RM | (RegOpcode << 3) | (Mod << 6);
307 }
308 
309 static void emitConstant(uint64_t Val, unsigned Size,
310                          SmallVectorImpl<char> &CB) {
311   // Output the constant in little endian byte order.
312   for (unsigned i = 0; i != Size; ++i) {
313     emitByte(Val & 255, CB);
314     Val >>= 8;
315   }
316 }
317 
318 /// Determine if this immediate can fit in a disp8 or a compressed disp8 for
319 /// EVEX instructions. \p will be set to the value to pass to the ImmOffset
320 /// parameter of emitImmediate.
321 static bool isDispOrCDisp8(uint64_t TSFlags, int Value, int &ImmOffset) {
322   bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
323 
324   unsigned CD8_Scale =
325       (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
326   CD8_Scale = CD8_Scale ? 1U << (CD8_Scale - 1) : 0U;
327   if (!HasEVEX || !CD8_Scale)
328     return isInt<8>(Value);
329 
330   assert(isPowerOf2_32(CD8_Scale) && "Unexpected CD8 scale!");
331   if (Value & (CD8_Scale - 1)) // Unaligned offset
332     return false;
333 
334   int CDisp8 = Value / static_cast<int>(CD8_Scale);
335   if (!isInt<8>(CDisp8))
336     return false;
337 
338   // ImmOffset will be added to Value in emitImmediate leaving just CDisp8.
339   ImmOffset = CDisp8 - Value;
340   return true;
341 }
342 
343 /// \returns the appropriate fixup kind to use for an immediate in an
344 /// instruction with the specified TSFlags.
345 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
346   unsigned Size = X86II::getSizeOfImm(TSFlags);
347   bool isPCRel = X86II::isImmPCRel(TSFlags);
348 
349   if (X86II::isImmSigned(TSFlags)) {
350     switch (Size) {
351     default:
352       llvm_unreachable("Unsupported signed fixup size!");
353     case 4:
354       return MCFixupKind(X86::reloc_signed_4byte);
355     }
356   }
357   return MCFixup::getKindForSize(Size, isPCRel);
358 }
359 
360 enum GlobalOffsetTableExprKind { GOT_None, GOT_Normal, GOT_SymDiff };
361 
362 /// Check if this expression starts with  _GLOBAL_OFFSET_TABLE_ and if it is
363 /// of the form _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on
364 /// ELF i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
365 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start of a
366 /// binary expression.
367 static GlobalOffsetTableExprKind
368 startsWithGlobalOffsetTable(const MCExpr *Expr) {
369   const MCExpr *RHS = nullptr;
370   if (Expr->getKind() == MCExpr::Binary) {
371     const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr);
372     Expr = BE->getLHS();
373     RHS = BE->getRHS();
374   }
375 
376   if (Expr->getKind() != MCExpr::SymbolRef)
377     return GOT_None;
378 
379   const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
380   const MCSymbol &S = Ref->getSymbol();
381   if (S.getName() != "_GLOBAL_OFFSET_TABLE_")
382     return GOT_None;
383   if (RHS && RHS->getKind() == MCExpr::SymbolRef)
384     return GOT_SymDiff;
385   return GOT_Normal;
386 }
387 
388 static bool hasSecRelSymbolRef(const MCExpr *Expr) {
389   if (Expr->getKind() == MCExpr::SymbolRef) {
390     const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr *>(Expr);
391     return Ref->getKind() == MCSymbolRefExpr::VK_SECREL;
392   }
393   return false;
394 }
395 
396 static bool isPCRel32Branch(const MCInst &MI, const MCInstrInfo &MCII) {
397   unsigned Opcode = MI.getOpcode();
398   const MCInstrDesc &Desc = MCII.get(Opcode);
399   if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4 &&
400        Opcode != X86::JCC_4) ||
401       getImmFixupKind(Desc.TSFlags) != FK_PCRel_4)
402     return false;
403 
404   unsigned CurOp = X86II::getOperandBias(Desc);
405   const MCOperand &Op = MI.getOperand(CurOp);
406   if (!Op.isExpr())
407     return false;
408 
409   const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr());
410   return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None;
411 }
412 
413 unsigned X86MCCodeEmitter::getX86RegNum(const MCOperand &MO) const {
414   return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
415 }
416 
417 unsigned X86MCCodeEmitter::getX86RegEncoding(const MCInst &MI,
418                                              unsigned OpNum) const {
419   return Ctx.getRegisterInfo()->getEncodingValue(MI.getOperand(OpNum).getReg());
420 }
421 
422 void X86MCCodeEmitter::emitImmediate(const MCOperand &DispOp, SMLoc Loc,
423                                      unsigned Size, MCFixupKind FixupKind,
424                                      uint64_t StartByte,
425                                      SmallVectorImpl<char> &CB,
426                                      SmallVectorImpl<MCFixup> &Fixups,
427                                      int ImmOffset) const {
428   const MCExpr *Expr = nullptr;
429   if (DispOp.isImm()) {
430     // If this is a simple integer displacement that doesn't require a
431     // relocation, emit it now.
432     if (FixupKind != FK_PCRel_1 && FixupKind != FK_PCRel_2 &&
433         FixupKind != FK_PCRel_4) {
434       emitConstant(DispOp.getImm() + ImmOffset, Size, CB);
435       return;
436     }
437     Expr = MCConstantExpr::create(DispOp.getImm(), Ctx);
438   } else {
439     Expr = DispOp.getExpr();
440   }
441 
442   // If we have an immoffset, add it to the expression.
443   if ((FixupKind == FK_Data_4 || FixupKind == FK_Data_8 ||
444        FixupKind == MCFixupKind(X86::reloc_signed_4byte))) {
445     GlobalOffsetTableExprKind Kind = startsWithGlobalOffsetTable(Expr);
446     if (Kind != GOT_None) {
447       assert(ImmOffset == 0);
448 
449       if (Size == 8) {
450         FixupKind = MCFixupKind(X86::reloc_global_offset_table8);
451       } else {
452         assert(Size == 4);
453         FixupKind = MCFixupKind(X86::reloc_global_offset_table);
454       }
455 
456       if (Kind == GOT_Normal)
457         ImmOffset = static_cast<int>(CB.size() - StartByte);
458     } else if (Expr->getKind() == MCExpr::SymbolRef) {
459       if (hasSecRelSymbolRef(Expr)) {
460         FixupKind = MCFixupKind(FK_SecRel_4);
461       }
462     } else if (Expr->getKind() == MCExpr::Binary) {
463       const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr *>(Expr);
464       if (hasSecRelSymbolRef(Bin->getLHS()) ||
465           hasSecRelSymbolRef(Bin->getRHS())) {
466         FixupKind = MCFixupKind(FK_SecRel_4);
467       }
468     }
469   }
470 
471   // If the fixup is pc-relative, we need to bias the value to be relative to
472   // the start of the field, not the end of the field.
473   if (FixupKind == FK_PCRel_4 ||
474       FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
475       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
476       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
477       FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) ||
478       FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) {
479     ImmOffset -= 4;
480     // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
481     // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
482     // this needs to be a GOTPC32 relocation.
483     if (startsWithGlobalOffsetTable(Expr) != GOT_None)
484       FixupKind = MCFixupKind(X86::reloc_global_offset_table);
485   }
486   if (FixupKind == FK_PCRel_2)
487     ImmOffset -= 2;
488   if (FixupKind == FK_PCRel_1)
489     ImmOffset -= 1;
490 
491   if (ImmOffset)
492     Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx),
493                                    Ctx);
494 
495   // Emit a symbolic constant as a fixup and 4 zeros.
496   Fixups.push_back(MCFixup::create(static_cast<uint32_t>(CB.size() - StartByte),
497                                    Expr, FixupKind, Loc));
498   emitConstant(0, Size, CB);
499 }
500 
501 void X86MCCodeEmitter::emitRegModRMByte(const MCOperand &ModRMReg,
502                                         unsigned RegOpcodeFld,
503                                         SmallVectorImpl<char> &CB) const {
504   emitByte(modRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)), CB);
505 }
506 
507 void X86MCCodeEmitter::emitSIBByte(unsigned SS, unsigned Index, unsigned Base,
508                                    SmallVectorImpl<char> &CB) const {
509   // SIB byte is in the same format as the modRMByte.
510   emitByte(modRMByte(SS, Index, Base), CB);
511 }
512 
513 void X86MCCodeEmitter::emitMemModRMByte(
514     const MCInst &MI, unsigned Op, unsigned RegOpcodeField, uint64_t TSFlags,
515     PrefixKind Kind, uint64_t StartByte, SmallVectorImpl<char> &CB,
516     SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI,
517     bool ForceSIB) const {
518   const MCOperand &Disp = MI.getOperand(Op + X86::AddrDisp);
519   const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
520   const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
521   const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
522   unsigned BaseReg = Base.getReg();
523 
524   // Handle %rip relative addressing.
525   if (BaseReg == X86::RIP ||
526       BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
527     assert(STI.hasFeature(X86::Is64Bit) &&
528            "Rip-relative addressing requires 64-bit mode");
529     assert(IndexReg.getReg() == 0 && !ForceSIB &&
530            "Invalid rip-relative address");
531     emitByte(modRMByte(0, RegOpcodeField, 5), CB);
532 
533     unsigned Opcode = MI.getOpcode();
534     unsigned FixupKind = [&]() {
535       // Enable relaxed relocation only for a MCSymbolRefExpr.  We cannot use a
536       // relaxed relocation if an offset is present (e.g. x@GOTPCREL+4).
537       if (!(Disp.isExpr() && isa<MCSymbolRefExpr>(Disp.getExpr())))
538         return X86::reloc_riprel_4byte;
539 
540       // Certain loads for GOT references can be relocated against the symbol
541       // directly if the symbol ends up in the same linkage unit.
542       switch (Opcode) {
543       default:
544         return X86::reloc_riprel_4byte;
545       case X86::MOV64rm:
546         // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
547         // special case because COFF and Mach-O don't support ELF's more
548         // flexible R_X86_64_REX_GOTPCRELX relaxation.
549         assert(Kind == REX);
550         return X86::reloc_riprel_4byte_movq_load;
551       case X86::ADC32rm:
552       case X86::ADD32rm:
553       case X86::AND32rm:
554       case X86::CMP32rm:
555       case X86::MOV32rm:
556       case X86::OR32rm:
557       case X86::SBB32rm:
558       case X86::SUB32rm:
559       case X86::TEST32mr:
560       case X86::XOR32rm:
561       case X86::CALL64m:
562       case X86::JMP64m:
563       case X86::TAILJMPm64:
564       case X86::TEST64mr:
565       case X86::ADC64rm:
566       case X86::ADD64rm:
567       case X86::AND64rm:
568       case X86::CMP64rm:
569       case X86::OR64rm:
570       case X86::SBB64rm:
571       case X86::SUB64rm:
572       case X86::XOR64rm:
573         return Kind == REX ? X86::reloc_riprel_4byte_relax_rex
574                            : X86::reloc_riprel_4byte_relax;
575       }
576     }();
577 
578     // rip-relative addressing is actually relative to the *next* instruction.
579     // Since an immediate can follow the mod/rm byte for an instruction, this
580     // means that we need to bias the displacement field of the instruction with
581     // the size of the immediate field. If we have this case, add it into the
582     // expression to emit.
583     // Note: rip-relative addressing using immediate displacement values should
584     // not be adjusted, assuming it was the user's intent.
585     int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags)
586                       ? X86II::getSizeOfImm(TSFlags)
587                       : 0;
588 
589     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
590                   Fixups, -ImmSize);
591     return;
592   }
593 
594   unsigned BaseRegNo = BaseReg ? getX86RegNum(Base) : -1U;
595 
596   // 16-bit addressing forms of the ModR/M byte have a different encoding for
597   // the R/M field and are far more limited in which registers can be used.
598   if (X86_MC::is16BitMemOperand(MI, Op, STI)) {
599     if (BaseReg) {
600       // For 32-bit addressing, the row and column values in Table 2-2 are
601       // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
602       // some special cases. And getX86RegNum reflects that numbering.
603       // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
604       // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
605       // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
606       // while values 0-3 indicate the allowed combinations (base+index) of
607       // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
608       //
609       // R16Table[] is a lookup from the normal RegNo, to the row values from
610       // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
611       static const unsigned R16Table[] = {0, 0, 0, 7, 0, 6, 4, 5};
612       unsigned RMfield = R16Table[BaseRegNo];
613 
614       assert(RMfield && "invalid 16-bit base register");
615 
616       if (IndexReg.getReg()) {
617         unsigned IndexReg16 = R16Table[getX86RegNum(IndexReg)];
618 
619         assert(IndexReg16 && "invalid 16-bit index register");
620         // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
621         assert(((IndexReg16 ^ RMfield) & 2) &&
622                "invalid 16-bit base/index register combination");
623         assert(Scale.getImm() == 1 &&
624                "invalid scale for 16-bit memory reference");
625 
626         // Allow base/index to appear in either order (although GAS doesn't).
627         if (IndexReg16 & 2)
628           RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1);
629         else
630           RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1);
631       }
632 
633       if (Disp.isImm() && isInt<8>(Disp.getImm())) {
634         if (Disp.getImm() == 0 && RMfield != 6) {
635           // There is no displacement; just the register.
636           emitByte(modRMByte(0, RegOpcodeField, RMfield), CB);
637           return;
638         }
639         // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
640         emitByte(modRMByte(1, RegOpcodeField, RMfield), CB);
641         emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups);
642         return;
643       }
644       // This is the [REG]+disp16 case.
645       emitByte(modRMByte(2, RegOpcodeField, RMfield), CB);
646     } else {
647       assert(IndexReg.getReg() == 0 && "Unexpected index register!");
648       // There is no BaseReg; this is the plain [disp16] case.
649       emitByte(modRMByte(0, RegOpcodeField, 6), CB);
650     }
651 
652     // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
653     emitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, StartByte, CB, Fixups);
654     return;
655   }
656 
657   // Check for presence of {disp8} or {disp32} pseudo prefixes.
658   bool UseDisp8 = MI.getFlags() & X86::IP_USE_DISP8;
659   bool UseDisp32 = MI.getFlags() & X86::IP_USE_DISP32;
660 
661   // We only allow no displacement if no pseudo prefix is present.
662   bool AllowNoDisp = !UseDisp8 && !UseDisp32;
663   // Disp8 is allowed unless the {disp32} prefix is present.
664   bool AllowDisp8 = !UseDisp32;
665 
666   // Determine whether a SIB byte is needed.
667   if (// The SIB byte must be used if there is an index register or the
668       // encoding requires a SIB byte.
669       !ForceSIB && IndexReg.getReg() == 0 &&
670       // The SIB byte must be used if the base is ESP/RSP/R12, all of which
671       // encode to an R/M value of 4, which indicates that a SIB byte is
672       // present.
673       BaseRegNo != N86::ESP &&
674       // If there is no base register and we're in 64-bit mode, we need a SIB
675       // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
676       (!STI.hasFeature(X86::Is64Bit) || BaseReg != 0)) {
677 
678     if (BaseReg == 0) { // [disp32]     in X86-32 mode
679       emitByte(modRMByte(0, RegOpcodeField, 5), CB);
680       emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups);
681       return;
682     }
683 
684     // If the base is not EBP/ESP/R12/R13 and there is no displacement, use
685     // simple indirect register encoding, this handles addresses like [EAX].
686     // The encoding for [EBP] or[R13] with no displacement means [disp32] so we
687     // handle it by emitting a displacement of 0 later.
688     if (BaseRegNo != N86::EBP) {
689       if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp) {
690         emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
691         return;
692       }
693 
694       // If the displacement is @tlscall, treat it as a zero.
695       if (Disp.isExpr()) {
696         auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr());
697         if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) {
698           // This is exclusively used by call *a@tlscall(base). The relocation
699           // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
700           Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc()));
701           emitByte(modRMByte(0, RegOpcodeField, BaseRegNo), CB);
702           return;
703         }
704       }
705     }
706 
707     // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
708     // Including a compressed disp8 for EVEX instructions that support it.
709     // This also handles the 0 displacement for [EBP] or [R13]. We can't use
710     // disp8 if the {disp32} pseudo prefix is present.
711     if (Disp.isImm() && AllowDisp8) {
712       int ImmOffset = 0;
713       if (isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
714         emitByte(modRMByte(1, RegOpcodeField, BaseRegNo), CB);
715         emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
716                       ImmOffset);
717         return;
718       }
719     }
720 
721     // Otherwise, emit the most general non-SIB encoding: [REG+disp32].
722     // Displacement may be 0 for [EBP] or [R13] case if {disp32} pseudo prefix
723     // prevented using disp8 above.
724     emitByte(modRMByte(2, RegOpcodeField, BaseRegNo), CB);
725     unsigned Opcode = MI.getOpcode();
726     unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
727                                                 : X86::reloc_signed_4byte;
728     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), StartByte, CB,
729                   Fixups);
730     return;
731   }
732 
733   // We need a SIB byte, so start by outputting the ModR/M byte first
734   assert(IndexReg.getReg() != X86::ESP && IndexReg.getReg() != X86::RSP &&
735          "Cannot use ESP as index reg!");
736 
737   bool ForceDisp32 = false;
738   bool ForceDisp8 = false;
739   int ImmOffset = 0;
740   if (BaseReg == 0) {
741     // If there is no base register, we emit the special case SIB byte with
742     // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
743     BaseRegNo = 5;
744     emitByte(modRMByte(0, RegOpcodeField, 4), CB);
745     ForceDisp32 = true;
746   } else if (Disp.isImm() && Disp.getImm() == 0 && AllowNoDisp &&
747              // Base reg can't be EBP/RBP/R13 as that would end up with '5' as
748              // the base field, but that is the magic [*] nomenclature that
749              // indicates no base when mod=0. For these cases we'll emit a 0
750              // displacement instead.
751              BaseRegNo != N86::EBP) {
752     // Emit no displacement ModR/M byte
753     emitByte(modRMByte(0, RegOpcodeField, 4), CB);
754   } else if (Disp.isImm() && AllowDisp8 &&
755              isDispOrCDisp8(TSFlags, Disp.getImm(), ImmOffset)) {
756     // Displacement fits in a byte or matches an EVEX compressed disp8, use
757     // disp8 encoding. This also handles EBP/R13 base with 0 displacement unless
758     // {disp32} pseudo prefix was used.
759     emitByte(modRMByte(1, RegOpcodeField, 4), CB);
760     ForceDisp8 = true;
761   } else {
762     // Otherwise, emit the normal disp32 encoding.
763     emitByte(modRMByte(2, RegOpcodeField, 4), CB);
764     ForceDisp32 = true;
765   }
766 
767   // Calculate what the SS field value should be...
768   static const unsigned SSTable[] = {~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3};
769   unsigned SS = SSTable[Scale.getImm()];
770 
771   unsigned IndexRegNo = IndexReg.getReg() ? getX86RegNum(IndexReg) : 4;
772 
773   emitSIBByte(SS, IndexRegNo, BaseRegNo, CB);
774 
775   // Do we need to output a displacement?
776   if (ForceDisp8)
777     emitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, StartByte, CB, Fixups,
778                   ImmOffset);
779   else if (ForceDisp32)
780     emitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
781                   StartByte, CB, Fixups);
782 }
783 
784 /// Emit all instruction prefixes.
785 ///
786 /// \returns one of the REX, XOP, VEX2, VEX3, EVEX if any of them is used,
787 /// otherwise returns None.
788 PrefixKind X86MCCodeEmitter::emitPrefixImpl(unsigned &CurOp, const MCInst &MI,
789                                             const MCSubtargetInfo &STI,
790                                             SmallVectorImpl<char> &CB) const {
791   uint64_t TSFlags = MCII.get(MI.getOpcode()).TSFlags;
792   // Determine where the memory operand starts, if present.
793   int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
794   // Emit segment override opcode prefix as needed.
795   if (MemoryOperand != -1) {
796     MemoryOperand += CurOp;
797     emitSegmentOverridePrefix(MemoryOperand + X86::AddrSegmentReg, MI, CB);
798   }
799 
800   // Emit the repeat opcode prefix as needed.
801   unsigned Flags = MI.getFlags();
802   if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT)
803     emitByte(0xF3, CB);
804   if (Flags & X86::IP_HAS_REPEAT_NE)
805     emitByte(0xF2, CB);
806 
807   // Emit the address size opcode prefix as needed.
808   if (X86_MC::needsAddressSizeOverride(MI, STI, MemoryOperand, TSFlags) ||
809       Flags & X86::IP_HAS_AD_SIZE)
810     emitByte(0x67, CB);
811 
812   uint64_t Form = TSFlags & X86II::FormMask;
813   switch (Form) {
814   default:
815     break;
816   case X86II::RawFrmDstSrc: {
817     // Emit segment override opcode prefix as needed (not for %ds).
818     if (MI.getOperand(2).getReg() != X86::DS)
819       emitSegmentOverridePrefix(2, MI, CB);
820     CurOp += 3; // Consume operands.
821     break;
822   }
823   case X86II::RawFrmSrc: {
824     // Emit segment override opcode prefix as needed (not for %ds).
825     if (MI.getOperand(1).getReg() != X86::DS)
826       emitSegmentOverridePrefix(1, MI, CB);
827     CurOp += 2; // Consume operands.
828     break;
829   }
830   case X86II::RawFrmDst: {
831     ++CurOp; // Consume operand.
832     break;
833   }
834   case X86II::RawFrmMemOffs: {
835     // Emit segment override opcode prefix as needed.
836     emitSegmentOverridePrefix(1, MI, CB);
837     break;
838   }
839   }
840 
841   // REX prefix is optional, but if used must be immediately before the opcode
842   // Encoding type for this instruction.
843   return (TSFlags & X86II::EncodingMask)
844              ? emitVEXOpcodePrefix(MemoryOperand, MI, CB)
845              : emitOpcodePrefix(MemoryOperand, MI, STI, CB);
846 }
847 
848 // AVX instructions are encoded using an encoding scheme that combines
849 // prefix bytes, opcode extension field, operand encoding fields, and vector
850 // length encoding capability into a new prefix, referred to as VEX.
851 
852 // The majority of the AVX-512 family of instructions (operating on
853 // 512/256/128-bit vector register operands) are encoded using a new prefix
854 // (called EVEX).
855 
856 // XOP is a revised subset of what was originally intended as SSE5. It was
857 // changed to be similar but not overlapping with AVX.
858 
859 /// Emit XOP, VEX2, VEX3 or EVEX prefix.
860 /// \returns the used prefix.
861 PrefixKind
862 X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
863                                       SmallVectorImpl<char> &CB) const {
864   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
865   uint64_t TSFlags = Desc.TSFlags;
866 
867   assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX.");
868 
869   X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
870   switch (TSFlags & X86II::EncodingMask) {
871   default:
872     break;
873   case X86II::XOP:
874     Prefix.setLowerBound(XOP);
875     break;
876   case X86II::VEX:
877     // VEX can be 2 byte or 3 byte, not determined yet if not explicit
878     Prefix.setLowerBound(MI.getFlags() & X86::IP_USE_VEX3 ? VEX3 : VEX2);
879     break;
880   case X86II::EVEX:
881     Prefix.setLowerBound(EVEX);
882     break;
883   }
884 
885   Prefix.setW(TSFlags & X86II::REX_W);
886 
887   bool HasEVEX_K = TSFlags & X86II::EVEX_K;
888   bool HasVEX_4V = TSFlags & X86II::VEX_4V;
889   bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
890 
891   switch (TSFlags & X86II::OpMapMask) {
892   default:
893     llvm_unreachable("Invalid prefix!");
894   case X86II::TB:
895     Prefix.set5M(0x1); // 0F
896     break;
897   case X86II::T8:
898     Prefix.set5M(0x2); // 0F 38
899     break;
900   case X86II::TA:
901     Prefix.set5M(0x3); // 0F 3A
902     break;
903   case X86II::XOP8:
904     Prefix.set5M(0x8);
905     break;
906   case X86II::XOP9:
907     Prefix.set5M(0x9);
908     break;
909   case X86II::XOPA:
910     Prefix.set5M(0xA);
911     break;
912   case X86II::T_MAP5:
913     Prefix.set5M(0x5);
914     break;
915   case X86II::T_MAP6:
916     Prefix.set5M(0x6);
917     break;
918   }
919 
920   Prefix.setL(TSFlags & X86II::VEX_L);
921   Prefix.setL2(TSFlags & X86II::EVEX_L2);
922   switch (TSFlags & X86II::OpPrefixMask) {
923   case X86II::PD:
924     Prefix.setPP(0x1); // 66
925     break;
926   case X86II::XS:
927     Prefix.setPP(0x2); // F3
928     break;
929   case X86II::XD:
930     Prefix.setPP(0x3); // F2
931     break;
932   }
933 
934   Prefix.setZ(HasEVEX_K && (TSFlags & X86II::EVEX_Z));
935   Prefix.setEVEX_b(TSFlags & X86II::EVEX_B);
936 
937   bool EncodeRC = false;
938   uint8_t EVEX_rc = 0;
939   unsigned CurOp = X86II::getOperandBias(Desc);
940 
941   switch (TSFlags & X86II::FormMask) {
942   default:
943     llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
944   case X86II::MRMDestMem4VOp3CC: {
945     //  MemAddr, src1(ModR/M), src2(VEX_4V)
946     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
947     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
948     CurOp += X86::AddrNumOperands;
949     Prefix.setR(MI, ++CurOp);
950     Prefix.set4V(MI, CurOp++);
951     break;
952   }
953   case X86II::MRM_C0:
954   case X86II::RawFrm:
955     break;
956   case X86II::MRMDestMemFSIB:
957   case X86II::MRMDestMem: {
958     // MRMDestMem instructions forms:
959     //  MemAddr, src1(ModR/M)
960     //  MemAddr, src1(VEX_4V), src2(ModR/M)
961     //  MemAddr, src1(ModR/M), imm8
962     //
963     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
964     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
965     if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
966       Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
967 
968     CurOp += X86::AddrNumOperands;
969 
970     if (HasEVEX_K)
971       Prefix.setAAA(MI, CurOp++);
972 
973     if (HasVEX_4V)
974       Prefix.set4VV2(MI, CurOp++);
975 
976     Prefix.setRR2(MI, CurOp++);
977     break;
978   }
979   case X86II::MRMSrcMemFSIB:
980   case X86II::MRMSrcMem: {
981     // MRMSrcMem instructions forms:
982     //  src1(ModR/M), MemAddr
983     //  src1(ModR/M), src2(VEX_4V), MemAddr
984     //  src1(ModR/M), MemAddr, imm8
985     //  src1(ModR/M), MemAddr, src2(Imm[7:4])
986     //
987     //  FMA4:
988     //  dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
989     Prefix.setRR2(MI, CurOp++);
990 
991     if (HasEVEX_K)
992       Prefix.setAAA(MI, CurOp++);
993 
994     if (HasVEX_4V)
995       Prefix.set4VV2(MI, CurOp++);
996 
997     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
998     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
999     if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
1000       Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
1001 
1002     break;
1003   }
1004   case X86II::MRMSrcMem4VOp3: {
1005     // Instruction format for 4VOp3:
1006     //   src1(ModR/M), MemAddr, src3(VEX_4V)
1007     Prefix.setR(MI, CurOp++);
1008     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1009     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1010     Prefix.set4V(MI, CurOp + X86::AddrNumOperands);
1011     break;
1012   }
1013   case X86II::MRMSrcMemOp4: {
1014     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1015     Prefix.setR(MI, CurOp++);
1016     Prefix.set4V(MI, CurOp++);
1017     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1018     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1019     break;
1020   }
1021   case X86II::MRM0m:
1022   case X86II::MRM1m:
1023   case X86II::MRM2m:
1024   case X86II::MRM3m:
1025   case X86II::MRM4m:
1026   case X86II::MRM5m:
1027   case X86II::MRM6m:
1028   case X86II::MRM7m: {
1029     // MRM[0-9]m instructions forms:
1030     //  MemAddr
1031     //  src1(VEX_4V), MemAddr
1032     if (HasVEX_4V)
1033       Prefix.set4VV2(MI, CurOp++);
1034 
1035     if (HasEVEX_K)
1036       Prefix.setAAA(MI, CurOp++);
1037 
1038     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1039     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1040     if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
1041       Prefix.setV2(MI, MemOperand + X86::AddrIndexReg);
1042 
1043     break;
1044   }
1045   case X86II::MRMSrcReg: {
1046     // MRMSrcReg instructions forms:
1047     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
1048     //  dst(ModR/M), src1(ModR/M)
1049     //  dst(ModR/M), src1(ModR/M), imm8
1050     //
1051     //  FMA4:
1052     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1053     Prefix.setRR2(MI, CurOp++);
1054 
1055     if (HasEVEX_K)
1056       Prefix.setAAA(MI, CurOp++);
1057 
1058     if (HasVEX_4V)
1059       Prefix.set4VV2(MI, CurOp++);
1060 
1061     Prefix.setB(MI, CurOp);
1062     Prefix.setX(MI, CurOp, 4);
1063     ++CurOp;
1064 
1065     if (TSFlags & X86II::EVEX_B) {
1066       if (HasEVEX_RC) {
1067         unsigned NumOps = Desc.getNumOperands();
1068         unsigned RcOperand = NumOps - 1;
1069         assert(RcOperand >= CurOp);
1070         EVEX_rc = MI.getOperand(RcOperand).getImm();
1071         assert(EVEX_rc <= 3 && "Invalid rounding control!");
1072       }
1073       EncodeRC = true;
1074     }
1075     break;
1076   }
1077   case X86II::MRMSrcReg4VOp3: {
1078     // Instruction format for 4VOp3:
1079     //   src1(ModR/M), src2(ModR/M), src3(VEX_4V)
1080     Prefix.setR(MI, CurOp++);
1081     Prefix.setB(MI, CurOp++);
1082     Prefix.set4V(MI, CurOp++);
1083     break;
1084   }
1085   case X86II::MRMSrcRegOp4: {
1086     //  dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
1087     Prefix.setR(MI, CurOp++);
1088     Prefix.set4V(MI, CurOp++);
1089     // Skip second register source (encoded in Imm[7:4])
1090     ++CurOp;
1091 
1092     Prefix.setB(MI, CurOp);
1093     Prefix.setX(MI, CurOp, 4);
1094     ++CurOp;
1095     break;
1096   }
1097   case X86II::MRMDestReg: {
1098     // MRMDestReg instructions forms:
1099     //  dst(ModR/M), src(ModR/M)
1100     //  dst(ModR/M), src(ModR/M), imm8
1101     //  dst(ModR/M), src1(VEX_4V), src2(ModR/M)
1102     Prefix.setB(MI, CurOp);
1103     Prefix.setX(MI, CurOp, 4);
1104     ++CurOp;
1105 
1106     if (HasEVEX_K)
1107       Prefix.setAAA(MI, CurOp++);
1108 
1109     if (HasVEX_4V)
1110       Prefix.set4VV2(MI, CurOp++);
1111 
1112     Prefix.setRR2(MI, CurOp++);
1113     if (TSFlags & X86II::EVEX_B)
1114       EncodeRC = true;
1115     break;
1116   }
1117   case X86II::MRMr0: {
1118     // MRMr0 instructions forms:
1119     //  11:rrr:000
1120     //  dst(ModR/M)
1121     Prefix.setRR2(MI, CurOp++);
1122     break;
1123   }
1124   case X86II::MRM0r:
1125   case X86II::MRM1r:
1126   case X86II::MRM2r:
1127   case X86II::MRM3r:
1128   case X86II::MRM4r:
1129   case X86II::MRM5r:
1130   case X86II::MRM6r:
1131   case X86II::MRM7r: {
1132     // MRM0r-MRM7r instructions forms:
1133     //  dst(VEX_4V), src(ModR/M), imm8
1134     if (HasVEX_4V)
1135       Prefix.set4VV2(MI, CurOp++);
1136 
1137     if (HasEVEX_K)
1138       Prefix.setAAA(MI, CurOp++);
1139 
1140     Prefix.setB(MI, CurOp);
1141     Prefix.setX(MI, CurOp, 4);
1142     ++CurOp;
1143     break;
1144   }
1145   }
1146   if (EncodeRC) {
1147     Prefix.setL(EVEX_rc & 0x1);
1148     Prefix.setL2(EVEX_rc & 0x2);
1149   }
1150   PrefixKind Kind = Prefix.determineOptimalKind();
1151   Prefix.emit(CB);
1152   return Kind;
1153 }
1154 
1155 /// Emit REX prefix which specifies
1156 ///   1) 64-bit instructions,
1157 ///   2) non-default operand size, and
1158 ///   3) use of X86-64 extended registers.
1159 ///
1160 /// \returns the used prefix (REX or None).
1161 PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
1162                                            const MCSubtargetInfo &STI,
1163                                            SmallVectorImpl<char> &CB) const {
1164   if (!STI.hasFeature(X86::Is64Bit))
1165     return None;
1166   X86OpcodePrefixHelper Prefix(*Ctx.getRegisterInfo());
1167   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1168   uint64_t TSFlags = Desc.TSFlags;
1169   Prefix.setW(TSFlags & X86II::REX_W);
1170   unsigned NumOps = MI.getNumOperands();
1171   bool UsesHighByteReg = false;
1172 #ifndef NDEBUG
1173   bool HasRegOp = false;
1174 #endif
1175   unsigned CurOp = NumOps ? X86II::getOperandBias(Desc) : 0;
1176   for (unsigned i = CurOp; i != NumOps; ++i) {
1177     const MCOperand &MO = MI.getOperand(i);
1178     if (MO.isReg()) {
1179 #ifndef NDEBUG
1180       HasRegOp = true;
1181 #endif
1182       unsigned Reg = MO.getReg();
1183       if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
1184         UsesHighByteReg = true;
1185       // If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
1186       if (X86II::isX86_64NonExtLowByteReg(Reg))
1187         Prefix.setLowerBound(REX);
1188     } else if (MO.isExpr() && STI.getTargetTriple().isX32()) {
1189       // GOTTPOFF and TLSDESC relocations require a REX prefix to allow
1190       // linker optimizations: even if the instructions we see may not require
1191       // any prefix, they may be replaced by instructions that do. This is
1192       // handled as a special case here so that it also works for hand-written
1193       // assembly without the user needing to write REX, as with GNU as.
1194       const auto *Ref = dyn_cast<MCSymbolRefExpr>(MO.getExpr());
1195       if (Ref && (Ref->getKind() == MCSymbolRefExpr::VK_GOTTPOFF ||
1196                   Ref->getKind() == MCSymbolRefExpr::VK_TLSDESC)) {
1197         Prefix.setLowerBound(REX);
1198       }
1199     }
1200   }
1201   switch (TSFlags & X86II::FormMask) {
1202   default:
1203     assert(!HasRegOp && "Unexpected form in emitREXPrefix!");
1204     break;
1205   case X86II::RawFrm:
1206   case X86II::RawFrmMemOffs:
1207   case X86II::RawFrmSrc:
1208   case X86II::RawFrmDst:
1209   case X86II::RawFrmDstSrc:
1210     break;
1211   case X86II::AddRegFrm:
1212     Prefix.setB(MI, CurOp++);
1213     break;
1214   case X86II::MRMSrcReg:
1215   case X86II::MRMSrcRegCC:
1216     Prefix.setR(MI, CurOp++);
1217     Prefix.setB(MI, CurOp++);
1218     break;
1219   case X86II::MRMSrcMem:
1220   case X86II::MRMSrcMemCC:
1221     Prefix.setR(MI, CurOp++);
1222     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1223     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1224     CurOp += X86::AddrNumOperands;
1225     break;
1226   case X86II::MRMDestReg:
1227     Prefix.setB(MI, CurOp++);
1228     Prefix.setR(MI, CurOp++);
1229     break;
1230   case X86II::MRMDestMem:
1231     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1232     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1233     CurOp += X86::AddrNumOperands;
1234     Prefix.setR(MI, CurOp++);
1235     break;
1236   case X86II::MRMXmCC:
1237   case X86II::MRMXm:
1238   case X86II::MRM0m:
1239   case X86II::MRM1m:
1240   case X86II::MRM2m:
1241   case X86II::MRM3m:
1242   case X86II::MRM4m:
1243   case X86II::MRM5m:
1244   case X86II::MRM6m:
1245   case X86II::MRM7m:
1246     Prefix.setB(MI, MemOperand + X86::AddrBaseReg);
1247     Prefix.setX(MI, MemOperand + X86::AddrIndexReg);
1248     break;
1249   case X86II::MRMXrCC:
1250   case X86II::MRMXr:
1251   case X86II::MRM0r:
1252   case X86II::MRM1r:
1253   case X86II::MRM2r:
1254   case X86II::MRM3r:
1255   case X86II::MRM4r:
1256   case X86II::MRM5r:
1257   case X86II::MRM6r:
1258   case X86II::MRM7r:
1259     Prefix.setB(MI, CurOp++);
1260     break;
1261   }
1262   PrefixKind Kind = Prefix.determineOptimalKind();
1263   if (Kind && UsesHighByteReg)
1264     report_fatal_error(
1265         "Cannot encode high byte register in REX-prefixed instruction");
1266   Prefix.emit(CB);
1267   return Kind;
1268 }
1269 
1270 /// Emit segment override opcode prefix as needed.
1271 void X86MCCodeEmitter::emitSegmentOverridePrefix(
1272     unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const {
1273   // Check for explicit segment override on memory operand.
1274   if (unsigned Reg = MI.getOperand(SegOperand).getReg())
1275     emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB);
1276 }
1277 
1278 /// Emit all instruction prefixes prior to the opcode.
1279 ///
1280 /// \param MemOperand the operand # of the start of a memory operand if present.
1281 /// If not present, it is -1.
1282 ///
1283 /// \returns the used prefix (REX or None).
1284 PrefixKind X86MCCodeEmitter::emitOpcodePrefix(int MemOperand, const MCInst &MI,
1285                                               const MCSubtargetInfo &STI,
1286                                               SmallVectorImpl<char> &CB) const {
1287   const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
1288   uint64_t TSFlags = Desc.TSFlags;
1289 
1290   // Emit the operand size opcode prefix as needed.
1291   if ((TSFlags & X86II::OpSizeMask) ==
1292       (STI.hasFeature(X86::Is16Bit) ? X86II::OpSize32 : X86II::OpSize16))
1293     emitByte(0x66, CB);
1294 
1295   // Emit the LOCK opcode prefix.
1296   if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK)
1297     emitByte(0xF0, CB);
1298 
1299   // Emit the NOTRACK opcode prefix.
1300   if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK)
1301     emitByte(0x3E, CB);
1302 
1303   switch (TSFlags & X86II::OpPrefixMask) {
1304   case X86II::PD: // 66
1305     emitByte(0x66, CB);
1306     break;
1307   case X86II::XS: // F3
1308     emitByte(0xF3, CB);
1309     break;
1310   case X86II::XD: // F2
1311     emitByte(0xF2, CB);
1312     break;
1313   }
1314 
1315   // Handle REX prefix.
1316   assert((STI.hasFeature(X86::Is64Bit) || !(TSFlags & X86II::REX_W)) &&
1317          "REX.W requires 64bit mode.");
1318   PrefixKind Kind = emitREXPrefix(MemOperand, MI, STI, CB);
1319 
1320   // 0x0F escape code must be emitted just before the opcode.
1321   switch (TSFlags & X86II::OpMapMask) {
1322   case X86II::TB:        // Two-byte opcode map
1323   case X86II::T8:        // 0F 38
1324   case X86II::TA:        // 0F 3A
1325   case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller.
1326     emitByte(0x0F, CB);
1327     break;
1328   }
1329 
1330   switch (TSFlags & X86II::OpMapMask) {
1331   case X86II::T8: // 0F 38
1332     emitByte(0x38, CB);
1333     break;
1334   case X86II::TA: // 0F 3A
1335     emitByte(0x3A, CB);
1336     break;
1337   }
1338 
1339   return Kind;
1340 }
1341 
1342 void X86MCCodeEmitter::emitPrefix(const MCInst &MI, SmallVectorImpl<char> &CB,
1343                                   const MCSubtargetInfo &STI) const {
1344   unsigned Opcode = MI.getOpcode();
1345   const MCInstrDesc &Desc = MCII.get(Opcode);
1346   uint64_t TSFlags = Desc.TSFlags;
1347 
1348   // Pseudo instructions don't get encoded.
1349   if (X86II::isPseudo(TSFlags))
1350     return;
1351 
1352   unsigned CurOp = X86II::getOperandBias(Desc);
1353 
1354   emitPrefixImpl(CurOp, MI, STI, CB);
1355 }
1356 
1357 void X86MCCodeEmitter::encodeInstruction(const MCInst &MI,
1358                                          SmallVectorImpl<char> &CB,
1359                                          SmallVectorImpl<MCFixup> &Fixups,
1360                                          const MCSubtargetInfo &STI) const {
1361   unsigned Opcode = MI.getOpcode();
1362   const MCInstrDesc &Desc = MCII.get(Opcode);
1363   uint64_t TSFlags = Desc.TSFlags;
1364 
1365   // Pseudo instructions don't get encoded.
1366   if (X86II::isPseudo(TSFlags))
1367     return;
1368 
1369   unsigned NumOps = Desc.getNumOperands();
1370   unsigned CurOp = X86II::getOperandBias(Desc);
1371 
1372   uint64_t StartByte = CB.size();
1373 
1374   PrefixKind Kind = emitPrefixImpl(CurOp, MI, STI, CB);
1375 
1376   // It uses the VEX.VVVV field?
1377   bool HasVEX_4V = TSFlags & X86II::VEX_4V;
1378   bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
1379 
1380   // It uses the EVEX.aaa field?
1381   bool HasEVEX_K = TSFlags & X86II::EVEX_K;
1382   bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
1383 
1384   // Used if a register is encoded in 7:4 of immediate.
1385   unsigned I8RegNum = 0;
1386 
1387   uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
1388 
1389   if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1390     BaseOpcode = 0x0F; // Weird 3DNow! encoding.
1391 
1392   unsigned OpcodeOffset = 0;
1393 
1394   uint64_t Form = TSFlags & X86II::FormMask;
1395   switch (Form) {
1396   default:
1397     errs() << "FORM: " << Form << "\n";
1398     llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1399   case X86II::Pseudo:
1400     llvm_unreachable("Pseudo instruction shouldn't be emitted");
1401   case X86II::RawFrmDstSrc:
1402   case X86II::RawFrmSrc:
1403   case X86II::RawFrmDst:
1404   case X86II::PrefixByte:
1405     emitByte(BaseOpcode, CB);
1406     break;
1407   case X86II::AddCCFrm: {
1408     // This will be added to the opcode in the fallthrough.
1409     OpcodeOffset = MI.getOperand(NumOps - 1).getImm();
1410     assert(OpcodeOffset < 16 && "Unexpected opcode offset!");
1411     --NumOps; // Drop the operand from the end.
1412     [[fallthrough]];
1413   case X86II::RawFrm:
1414     emitByte(BaseOpcode + OpcodeOffset, CB);
1415 
1416     if (!STI.hasFeature(X86::Is64Bit) || !isPCRel32Branch(MI, MCII))
1417       break;
1418 
1419     const MCOperand &Op = MI.getOperand(CurOp++);
1420     emitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags),
1421                   MCFixupKind(X86::reloc_branch_4byte_pcrel), StartByte, CB,
1422                   Fixups);
1423     break;
1424   }
1425   case X86II::RawFrmMemOffs:
1426     emitByte(BaseOpcode, CB);
1427     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1428                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1429                   StartByte, CB, Fixups);
1430     ++CurOp; // skip segment operand
1431     break;
1432   case X86II::RawFrmImm8:
1433     emitByte(BaseOpcode, CB);
1434     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1435                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1436                   StartByte, CB, Fixups);
1437     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, StartByte,
1438                   CB, Fixups);
1439     break;
1440   case X86II::RawFrmImm16:
1441     emitByte(BaseOpcode, CB);
1442     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1443                   X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1444                   StartByte, CB, Fixups);
1445     emitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, StartByte,
1446                   CB, Fixups);
1447     break;
1448 
1449   case X86II::AddRegFrm:
1450     emitByte(BaseOpcode + getX86RegNum(MI.getOperand(CurOp++)), CB);
1451     break;
1452 
1453   case X86II::MRMDestReg: {
1454     emitByte(BaseOpcode, CB);
1455     unsigned SrcRegNum = CurOp + 1;
1456 
1457     if (HasEVEX_K) // Skip writemask
1458       ++SrcRegNum;
1459 
1460     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1461       ++SrcRegNum;
1462 
1463     emitRegModRMByte(MI.getOperand(CurOp),
1464                      getX86RegNum(MI.getOperand(SrcRegNum)), CB);
1465     CurOp = SrcRegNum + 1;
1466     break;
1467   }
1468   case X86II::MRMDestMem4VOp3CC: {
1469     unsigned CC = MI.getOperand(8).getImm();
1470     emitByte(BaseOpcode + CC, CB);
1471     unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1472     emitMemModRMByte(MI, CurOp + 1, getX86RegNum(MI.getOperand(0)), TSFlags,
1473                      Kind, StartByte, CB, Fixups, STI, false);
1474     CurOp = SrcRegNum + 3; // skip reg, VEX_V4 and CC
1475     break;
1476   }
1477   case X86II::MRMDestMemFSIB:
1478   case X86II::MRMDestMem: {
1479     emitByte(BaseOpcode, CB);
1480     unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1481 
1482     if (HasEVEX_K) // Skip writemask
1483       ++SrcRegNum;
1484 
1485     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1486       ++SrcRegNum;
1487 
1488     bool ForceSIB = (Form == X86II::MRMDestMemFSIB);
1489     emitMemModRMByte(MI, CurOp, getX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
1490                      Kind, StartByte, CB, Fixups, STI, ForceSIB);
1491     CurOp = SrcRegNum + 1;
1492     break;
1493   }
1494   case X86II::MRMSrcReg: {
1495     emitByte(BaseOpcode, CB);
1496     unsigned SrcRegNum = CurOp + 1;
1497 
1498     if (HasEVEX_K) // Skip writemask
1499       ++SrcRegNum;
1500 
1501     if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1502       ++SrcRegNum;
1503 
1504     emitRegModRMByte(MI.getOperand(SrcRegNum),
1505                      getX86RegNum(MI.getOperand(CurOp)), CB);
1506     CurOp = SrcRegNum + 1;
1507     if (HasVEX_I8Reg)
1508       I8RegNum = getX86RegEncoding(MI, CurOp++);
1509     // do not count the rounding control operand
1510     if (HasEVEX_RC)
1511       --NumOps;
1512     break;
1513   }
1514   case X86II::MRMSrcReg4VOp3: {
1515     emitByte(BaseOpcode, CB);
1516     unsigned SrcRegNum = CurOp + 1;
1517 
1518     emitRegModRMByte(MI.getOperand(SrcRegNum),
1519                      getX86RegNum(MI.getOperand(CurOp)), CB);
1520     CurOp = SrcRegNum + 1;
1521     ++CurOp; // Encoded in VEX.VVVV
1522     break;
1523   }
1524   case X86II::MRMSrcRegOp4: {
1525     emitByte(BaseOpcode, CB);
1526     unsigned SrcRegNum = CurOp + 1;
1527 
1528     // Skip 1st src (which is encoded in VEX_VVVV)
1529     ++SrcRegNum;
1530 
1531     // Capture 2nd src (which is encoded in Imm[7:4])
1532     assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1533     I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
1534 
1535     emitRegModRMByte(MI.getOperand(SrcRegNum),
1536                      getX86RegNum(MI.getOperand(CurOp)), CB);
1537     CurOp = SrcRegNum + 1;
1538     break;
1539   }
1540   case X86II::MRMSrcRegCC: {
1541     unsigned FirstOp = CurOp++;
1542     unsigned SecondOp = CurOp++;
1543 
1544     unsigned CC = MI.getOperand(CurOp++).getImm();
1545     emitByte(BaseOpcode + CC, CB);
1546 
1547     emitRegModRMByte(MI.getOperand(SecondOp),
1548                      getX86RegNum(MI.getOperand(FirstOp)), CB);
1549     break;
1550   }
1551   case X86II::MRMSrcMemFSIB:
1552   case X86II::MRMSrcMem: {
1553     unsigned FirstMemOp = CurOp + 1;
1554 
1555     if (HasEVEX_K) // Skip writemask
1556       ++FirstMemOp;
1557 
1558     if (HasVEX_4V)
1559       ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1560 
1561     emitByte(BaseOpcode, CB);
1562 
1563     bool ForceSIB = (Form == X86II::MRMSrcMemFSIB);
1564     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1565                      TSFlags, Kind, StartByte, CB, Fixups, STI, ForceSIB);
1566     CurOp = FirstMemOp + X86::AddrNumOperands;
1567     if (HasVEX_I8Reg)
1568       I8RegNum = getX86RegEncoding(MI, CurOp++);
1569     break;
1570   }
1571   case X86II::MRMSrcMem4VOp3: {
1572     unsigned FirstMemOp = CurOp + 1;
1573 
1574     emitByte(BaseOpcode, CB);
1575 
1576     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1577                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1578     CurOp = FirstMemOp + X86::AddrNumOperands;
1579     ++CurOp; // Encoded in VEX.VVVV.
1580     break;
1581   }
1582   case X86II::MRMSrcMemOp4: {
1583     unsigned FirstMemOp = CurOp + 1;
1584 
1585     ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1586 
1587     // Capture second register source (encoded in Imm[7:4])
1588     assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg");
1589     I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
1590 
1591     emitByte(BaseOpcode, CB);
1592 
1593     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(CurOp)),
1594                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1595     CurOp = FirstMemOp + X86::AddrNumOperands;
1596     break;
1597   }
1598   case X86II::MRMSrcMemCC: {
1599     unsigned RegOp = CurOp++;
1600     unsigned FirstMemOp = CurOp;
1601     CurOp = FirstMemOp + X86::AddrNumOperands;
1602 
1603     unsigned CC = MI.getOperand(CurOp++).getImm();
1604     emitByte(BaseOpcode + CC, CB);
1605 
1606     emitMemModRMByte(MI, FirstMemOp, getX86RegNum(MI.getOperand(RegOp)),
1607                      TSFlags, Kind, StartByte, CB, Fixups, STI);
1608     break;
1609   }
1610 
1611   case X86II::MRMXrCC: {
1612     unsigned RegOp = CurOp++;
1613 
1614     unsigned CC = MI.getOperand(CurOp++).getImm();
1615     emitByte(BaseOpcode + CC, CB);
1616     emitRegModRMByte(MI.getOperand(RegOp), 0, CB);
1617     break;
1618   }
1619 
1620   case X86II::MRMXr:
1621   case X86II::MRM0r:
1622   case X86II::MRM1r:
1623   case X86II::MRM2r:
1624   case X86II::MRM3r:
1625   case X86II::MRM4r:
1626   case X86II::MRM5r:
1627   case X86II::MRM6r:
1628   case X86II::MRM7r:
1629     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1630       ++CurOp;
1631     if (HasEVEX_K) // Skip writemask
1632       ++CurOp;
1633     emitByte(BaseOpcode, CB);
1634     emitRegModRMByte(MI.getOperand(CurOp++),
1635                      (Form == X86II::MRMXr) ? 0 : Form - X86II::MRM0r, CB);
1636     break;
1637   case X86II::MRMr0:
1638     emitByte(BaseOpcode, CB);
1639     emitByte(modRMByte(3, getX86RegNum(MI.getOperand(CurOp++)), 0), CB);
1640     break;
1641 
1642   case X86II::MRMXmCC: {
1643     unsigned FirstMemOp = CurOp;
1644     CurOp = FirstMemOp + X86::AddrNumOperands;
1645 
1646     unsigned CC = MI.getOperand(CurOp++).getImm();
1647     emitByte(BaseOpcode + CC, CB);
1648 
1649     emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Kind, StartByte, CB, Fixups,
1650                      STI);
1651     break;
1652   }
1653 
1654   case X86II::MRMXm:
1655   case X86II::MRM0m:
1656   case X86II::MRM1m:
1657   case X86II::MRM2m:
1658   case X86II::MRM3m:
1659   case X86II::MRM4m:
1660   case X86II::MRM5m:
1661   case X86II::MRM6m:
1662   case X86II::MRM7m:
1663     if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1664       ++CurOp;
1665     if (HasEVEX_K) // Skip writemask
1666       ++CurOp;
1667     emitByte(BaseOpcode, CB);
1668     emitMemModRMByte(MI, CurOp,
1669                      (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
1670                      Kind, StartByte, CB, Fixups, STI);
1671     CurOp += X86::AddrNumOperands;
1672     break;
1673 
1674   case X86II::MRM0X:
1675   case X86II::MRM1X:
1676   case X86II::MRM2X:
1677   case X86II::MRM3X:
1678   case X86II::MRM4X:
1679   case X86II::MRM5X:
1680   case X86II::MRM6X:
1681   case X86II::MRM7X:
1682     emitByte(BaseOpcode, CB);
1683     emitByte(0xC0 + ((Form - X86II::MRM0X) << 3), CB);
1684     break;
1685 
1686   case X86II::MRM_C0:
1687   case X86II::MRM_C1:
1688   case X86II::MRM_C2:
1689   case X86II::MRM_C3:
1690   case X86II::MRM_C4:
1691   case X86II::MRM_C5:
1692   case X86II::MRM_C6:
1693   case X86II::MRM_C7:
1694   case X86II::MRM_C8:
1695   case X86II::MRM_C9:
1696   case X86II::MRM_CA:
1697   case X86II::MRM_CB:
1698   case X86II::MRM_CC:
1699   case X86II::MRM_CD:
1700   case X86II::MRM_CE:
1701   case X86II::MRM_CF:
1702   case X86II::MRM_D0:
1703   case X86II::MRM_D1:
1704   case X86II::MRM_D2:
1705   case X86II::MRM_D3:
1706   case X86II::MRM_D4:
1707   case X86II::MRM_D5:
1708   case X86II::MRM_D6:
1709   case X86II::MRM_D7:
1710   case X86II::MRM_D8:
1711   case X86II::MRM_D9:
1712   case X86II::MRM_DA:
1713   case X86II::MRM_DB:
1714   case X86II::MRM_DC:
1715   case X86II::MRM_DD:
1716   case X86II::MRM_DE:
1717   case X86II::MRM_DF:
1718   case X86II::MRM_E0:
1719   case X86II::MRM_E1:
1720   case X86II::MRM_E2:
1721   case X86II::MRM_E3:
1722   case X86II::MRM_E4:
1723   case X86II::MRM_E5:
1724   case X86II::MRM_E6:
1725   case X86II::MRM_E7:
1726   case X86II::MRM_E8:
1727   case X86II::MRM_E9:
1728   case X86II::MRM_EA:
1729   case X86II::MRM_EB:
1730   case X86II::MRM_EC:
1731   case X86II::MRM_ED:
1732   case X86II::MRM_EE:
1733   case X86II::MRM_EF:
1734   case X86II::MRM_F0:
1735   case X86II::MRM_F1:
1736   case X86II::MRM_F2:
1737   case X86II::MRM_F3:
1738   case X86II::MRM_F4:
1739   case X86II::MRM_F5:
1740   case X86II::MRM_F6:
1741   case X86II::MRM_F7:
1742   case X86II::MRM_F8:
1743   case X86II::MRM_F9:
1744   case X86II::MRM_FA:
1745   case X86II::MRM_FB:
1746   case X86II::MRM_FC:
1747   case X86II::MRM_FD:
1748   case X86II::MRM_FE:
1749   case X86II::MRM_FF:
1750     emitByte(BaseOpcode, CB);
1751     emitByte(0xC0 + Form - X86II::MRM_C0, CB);
1752     break;
1753   }
1754 
1755   if (HasVEX_I8Reg) {
1756     // The last source register of a 4 operand instruction in AVX is encoded
1757     // in bits[7:4] of a immediate byte.
1758     assert(I8RegNum < 16 && "Register encoding out of range");
1759     I8RegNum <<= 4;
1760     if (CurOp != NumOps) {
1761       unsigned Val = MI.getOperand(CurOp++).getImm();
1762       assert(Val < 16 && "Immediate operand value out of range");
1763       I8RegNum |= Val;
1764     }
1765     emitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
1766                   StartByte, CB, Fixups);
1767   } else {
1768     // If there is a remaining operand, it must be a trailing immediate. Emit it
1769     // according to the right size for the instruction. Some instructions
1770     // (SSE4a extrq and insertq) have two trailing immediates.
1771     while (CurOp != NumOps && NumOps - CurOp <= 2) {
1772       emitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1773                     X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
1774                     StartByte, CB, Fixups);
1775     }
1776   }
1777 
1778   if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow)
1779     emitByte(X86II::getBaseOpcodeFor(TSFlags), CB);
1780 
1781   assert(CB.size() - StartByte <= 15 &&
1782          "The size of instruction must be no longer than 15.");
1783 #ifndef NDEBUG
1784   // FIXME: Verify.
1785   if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
1786     errs() << "Cannot encode all operands of: ";
1787     MI.dump();
1788     errs() << '\n';
1789     abort();
1790   }
1791 #endif
1792 }
1793 
1794 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
1795                                             MCContext &Ctx) {
1796   return new X86MCCodeEmitter(MCII, Ctx);
1797 }
1798