1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the X86MCCodeEmitter class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/X86BaseInfo.h" 14 #include "MCTargetDesc/X86FixupKinds.h" 15 #include "MCTargetDesc/X86MCTargetDesc.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/MC/MCCodeEmitter.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCFixup.h" 21 #include "llvm/MC/MCInst.h" 22 #include "llvm/MC/MCInstrDesc.h" 23 #include "llvm/MC/MCInstrInfo.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSubtargetInfo.h" 26 #include "llvm/MC/MCSymbol.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <cassert> 30 #include <cstdint> 31 #include <cstdlib> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "mccodeemitter" 36 37 namespace { 38 39 class X86MCCodeEmitter : public MCCodeEmitter { 40 const MCInstrInfo &MCII; 41 MCContext &Ctx; 42 43 public: 44 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 45 : MCII(mcii), Ctx(ctx) { 46 } 47 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete; 48 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete; 49 ~X86MCCodeEmitter() override = default; 50 51 bool is64BitMode(const MCSubtargetInfo &STI) const { 52 return STI.getFeatureBits()[X86::Mode64Bit]; 53 } 54 55 bool is32BitMode(const MCSubtargetInfo &STI) const { 56 return STI.getFeatureBits()[X86::Mode32Bit]; 57 } 58 59 bool is16BitMode(const MCSubtargetInfo &STI) const { 60 return STI.getFeatureBits()[X86::Mode16Bit]; 61 } 62 63 /// Is16BitMemOperand - Return true if the specified instruction has 64 /// a 16-bit memory operand. Op specifies the operand # of the memoperand. 65 bool Is16BitMemOperand(const MCInst &MI, unsigned Op, 66 const MCSubtargetInfo &STI) const { 67 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 68 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 69 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 70 71 if (is16BitMode(STI) && BaseReg.getReg() == 0 && 72 Disp.isImm() && Disp.getImm() < 0x10000) 73 return true; 74 if ((BaseReg.getReg() != 0 && 75 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) || 76 (IndexReg.getReg() != 0 && 77 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg()))) 78 return true; 79 return false; 80 } 81 82 unsigned GetX86RegNum(const MCOperand &MO) const { 83 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; 84 } 85 86 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const { 87 return Ctx.getRegisterInfo()->getEncodingValue( 88 MI.getOperand(OpNum).getReg()); 89 } 90 91 // Does this register require a bit to be set in REX prefix. 92 bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const { 93 return (getX86RegEncoding(MI, OpNum) >> 3) & 1; 94 } 95 96 void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const { 97 OS << (char)C; 98 ++CurByte; 99 } 100 101 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, 102 raw_ostream &OS) const { 103 // Output the constant in little endian byte order. 104 for (unsigned i = 0; i != Size; ++i) { 105 EmitByte(Val & 255, CurByte, OS); 106 Val >>= 8; 107 } 108 } 109 110 void EmitImmediate(const MCOperand &Disp, SMLoc Loc, 111 unsigned ImmSize, MCFixupKind FixupKind, 112 unsigned &CurByte, raw_ostream &OS, 113 SmallVectorImpl<MCFixup> &Fixups, 114 int ImmOffset = 0) const; 115 116 static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) { 117 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!"); 118 return RM | (RegOpcode << 3) | (Mod << 6); 119 } 120 121 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, 122 unsigned &CurByte, raw_ostream &OS) const { 123 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS); 124 } 125 126 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base, 127 unsigned &CurByte, raw_ostream &OS) const { 128 // SIB byte is in the same format as the ModRMByte. 129 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS); 130 } 131 132 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, 133 uint64_t TSFlags, bool Rex, unsigned &CurByte, 134 raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups, 135 const MCSubtargetInfo &STI) const; 136 137 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 138 SmallVectorImpl<MCFixup> &Fixups, 139 const MCSubtargetInfo &STI) const override; 140 141 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 142 const MCInst &MI, const MCInstrDesc &Desc, 143 raw_ostream &OS) const; 144 145 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand, 146 const MCInst &MI, raw_ostream &OS) const; 147 148 bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 149 const MCInst &MI, const MCInstrDesc &Desc, 150 const MCSubtargetInfo &STI, raw_ostream &OS) const; 151 152 uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 153 int MemOperand, const MCInstrDesc &Desc) const; 154 155 bool isPCRel32Branch(const MCInst &MI) const; 156 }; 157 158 } // end anonymous namespace 159 160 /// isDisp8 - Return true if this signed displacement fits in a 8-bit 161 /// sign-extended field. 162 static bool isDisp8(int Value) { 163 return Value == (int8_t)Value; 164 } 165 166 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit 167 /// compressed dispacement field. 168 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) { 169 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) && 170 "Compressed 8-bit displacement is only valid for EVEX inst."); 171 172 unsigned CD8_Scale = 173 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift; 174 if (CD8_Scale == 0) { 175 CValue = Value; 176 return isDisp8(Value); 177 } 178 179 unsigned Mask = CD8_Scale - 1; 180 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size."); 181 if (Value & Mask) // Unaligned offset 182 return false; 183 Value /= (int)CD8_Scale; 184 bool Ret = (Value == (int8_t)Value); 185 186 if (Ret) 187 CValue = Value; 188 return Ret; 189 } 190 191 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate 192 /// in an instruction with the specified TSFlags. 193 static MCFixupKind getImmFixupKind(uint64_t TSFlags) { 194 unsigned Size = X86II::getSizeOfImm(TSFlags); 195 bool isPCRel = X86II::isImmPCRel(TSFlags); 196 197 if (X86II::isImmSigned(TSFlags)) { 198 switch (Size) { 199 default: llvm_unreachable("Unsupported signed fixup size!"); 200 case 4: return MCFixupKind(X86::reloc_signed_4byte); 201 } 202 } 203 return MCFixup::getKindForSize(Size, isPCRel); 204 } 205 206 /// Is32BitMemOperand - Return true if the specified instruction has 207 /// a 32-bit memory operand. Op specifies the operand # of the memoperand. 208 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) { 209 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 210 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 211 212 if ((BaseReg.getReg() != 0 && 213 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || 214 (IndexReg.getReg() != 0 && 215 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) 216 return true; 217 if (BaseReg.getReg() == X86::EIP) { 218 assert(IndexReg.getReg() == 0 && "Invalid eip-based address."); 219 return true; 220 } 221 if (IndexReg.getReg() == X86::EIZ) 222 return true; 223 return false; 224 } 225 226 /// Is64BitMemOperand - Return true if the specified instruction has 227 /// a 64-bit memory operand. Op specifies the operand # of the memoperand. 228 #ifndef NDEBUG 229 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) { 230 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 231 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 232 233 if ((BaseReg.getReg() != 0 && 234 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) || 235 (IndexReg.getReg() != 0 && 236 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg()))) 237 return true; 238 return false; 239 } 240 #endif 241 242 /// StartsWithGlobalOffsetTable - Check if this expression starts with 243 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form 244 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF 245 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that 246 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start 247 /// of a binary expression. 248 enum GlobalOffsetTableExprKind { 249 GOT_None, 250 GOT_Normal, 251 GOT_SymDiff 252 }; 253 static GlobalOffsetTableExprKind 254 StartsWithGlobalOffsetTable(const MCExpr *Expr) { 255 const MCExpr *RHS = nullptr; 256 if (Expr->getKind() == MCExpr::Binary) { 257 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); 258 Expr = BE->getLHS(); 259 RHS = BE->getRHS(); 260 } 261 262 if (Expr->getKind() != MCExpr::SymbolRef) 263 return GOT_None; 264 265 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 266 const MCSymbol &S = Ref->getSymbol(); 267 if (S.getName() != "_GLOBAL_OFFSET_TABLE_") 268 return GOT_None; 269 if (RHS && RHS->getKind() == MCExpr::SymbolRef) 270 return GOT_SymDiff; 271 return GOT_Normal; 272 } 273 274 static bool HasSecRelSymbolRef(const MCExpr *Expr) { 275 if (Expr->getKind() == MCExpr::SymbolRef) { 276 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 277 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; 278 } 279 return false; 280 } 281 282 bool X86MCCodeEmitter::isPCRel32Branch(const MCInst &MI) const { 283 unsigned Opcode = MI.getOpcode(); 284 const MCInstrDesc &Desc = MCII.get(Opcode); 285 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4) || 286 getImmFixupKind(Desc.TSFlags) != FK_PCRel_4) 287 return false; 288 289 unsigned CurOp = X86II::getOperandBias(Desc); 290 const MCOperand &Op = MI.getOperand(CurOp); 291 if (!Op.isExpr()) 292 return false; 293 294 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr()); 295 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None; 296 } 297 298 void X86MCCodeEmitter:: 299 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, 300 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, 301 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const { 302 const MCExpr *Expr = nullptr; 303 if (DispOp.isImm()) { 304 // If this is a simple integer displacement that doesn't require a 305 // relocation, emit it now. 306 if (FixupKind != FK_PCRel_1 && 307 FixupKind != FK_PCRel_2 && 308 FixupKind != FK_PCRel_4) { 309 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS); 310 return; 311 } 312 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx); 313 } else { 314 Expr = DispOp.getExpr(); 315 } 316 317 // If we have an immoffset, add it to the expression. 318 if ((FixupKind == FK_Data_4 || 319 FixupKind == FK_Data_8 || 320 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { 321 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr); 322 if (Kind != GOT_None) { 323 assert(ImmOffset == 0); 324 325 if (Size == 8) { 326 FixupKind = MCFixupKind(X86::reloc_global_offset_table8); 327 } else { 328 assert(Size == 4); 329 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 330 } 331 332 if (Kind == GOT_Normal) 333 ImmOffset = CurByte; 334 } else if (Expr->getKind() == MCExpr::SymbolRef) { 335 if (HasSecRelSymbolRef(Expr)) { 336 FixupKind = MCFixupKind(FK_SecRel_4); 337 } 338 } else if (Expr->getKind() == MCExpr::Binary) { 339 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr); 340 if (HasSecRelSymbolRef(Bin->getLHS()) 341 || HasSecRelSymbolRef(Bin->getRHS())) { 342 FixupKind = MCFixupKind(FK_SecRel_4); 343 } 344 } 345 } 346 347 // If the fixup is pc-relative, we need to bias the value to be relative to 348 // the start of the field, not the end of the field. 349 if (FixupKind == FK_PCRel_4 || 350 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || 351 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) || 352 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) || 353 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) || 354 FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) { 355 ImmOffset -= 4; 356 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_: 357 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15 358 // this needs to be a GOTPC32 relocation. 359 if (StartsWithGlobalOffsetTable(Expr) != GOT_None) 360 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 361 } 362 if (FixupKind == FK_PCRel_2) 363 ImmOffset -= 2; 364 if (FixupKind == FK_PCRel_1) 365 ImmOffset -= 1; 366 367 if (ImmOffset) 368 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx), 369 Ctx); 370 371 // Emit a symbolic constant as a fixup and 4 zeros. 372 Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc)); 373 EmitConstant(0, Size, CurByte, OS); 374 } 375 376 void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op, 377 unsigned RegOpcodeField, 378 uint64_t TSFlags, bool Rex, 379 unsigned &CurByte, raw_ostream &OS, 380 SmallVectorImpl<MCFixup> &Fixups, 381 const MCSubtargetInfo &STI) const { 382 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 383 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg); 384 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt); 385 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 386 unsigned BaseReg = Base.getReg(); 387 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX; 388 389 // Handle %rip relative addressing. 390 if (BaseReg == X86::RIP || 391 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode 392 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode"); 393 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address"); 394 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 395 396 unsigned Opcode = MI.getOpcode(); 397 // movq loads are handled with a special relocation form which allows the 398 // linker to eliminate some loads for GOT references which end up in the 399 // same linkage unit. 400 unsigned FixupKind = [=]() { 401 switch (Opcode) { 402 default: 403 return X86::reloc_riprel_4byte; 404 case X86::MOV64rm: 405 assert(Rex); 406 return X86::reloc_riprel_4byte_movq_load; 407 case X86::CALL64m: 408 case X86::JMP64m: 409 case X86::TAILJMPm64: 410 case X86::TEST64mr: 411 case X86::ADC64rm: 412 case X86::ADD64rm: 413 case X86::AND64rm: 414 case X86::CMP64rm: 415 case X86::OR64rm: 416 case X86::SBB64rm: 417 case X86::SUB64rm: 418 case X86::XOR64rm: 419 return Rex ? X86::reloc_riprel_4byte_relax_rex 420 : X86::reloc_riprel_4byte_relax; 421 } 422 }(); 423 424 // rip-relative addressing is actually relative to the *next* instruction. 425 // Since an immediate can follow the mod/rm byte for an instruction, this 426 // means that we need to bias the displacement field of the instruction with 427 // the size of the immediate field. If we have this case, add it into the 428 // expression to emit. 429 // Note: rip-relative addressing using immediate displacement values should 430 // not be adjusted, assuming it was the user's intent. 431 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags) 432 ? X86II::getSizeOfImm(TSFlags) 433 : 0; 434 435 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), 436 CurByte, OS, Fixups, -ImmSize); 437 return; 438 } 439 440 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U; 441 442 // 16-bit addressing forms of the ModR/M byte have a different encoding for 443 // the R/M field and are far more limited in which registers can be used. 444 if (Is16BitMemOperand(MI, Op, STI)) { 445 if (BaseReg) { 446 // For 32-bit addressing, the row and column values in Table 2-2 are 447 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with 448 // some special cases. And GetX86RegNum reflects that numbering. 449 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A, 450 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only 451 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order, 452 // while values 0-3 indicate the allowed combinations (base+index) of 453 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI. 454 // 455 // R16Table[] is a lookup from the normal RegNo, to the row values from 456 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed. 457 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 }; 458 unsigned RMfield = R16Table[BaseRegNo]; 459 460 assert(RMfield && "invalid 16-bit base register"); 461 462 if (IndexReg.getReg()) { 463 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)]; 464 465 assert(IndexReg16 && "invalid 16-bit index register"); 466 // We must have one of SI/DI (4,5), and one of BP/BX (6,7). 467 assert(((IndexReg16 ^ RMfield) & 2) && 468 "invalid 16-bit base/index register combination"); 469 assert(Scale.getImm() == 1 && 470 "invalid scale for 16-bit memory reference"); 471 472 // Allow base/index to appear in either order (although GAS doesn't). 473 if (IndexReg16 & 2) 474 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1); 475 else 476 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1); 477 } 478 479 if (Disp.isImm() && isDisp8(Disp.getImm())) { 480 if (Disp.getImm() == 0 && RMfield != 6) { 481 // There is no displacement; just the register. 482 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS); 483 return; 484 } 485 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded. 486 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS); 487 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 488 return; 489 } 490 // This is the [REG]+disp16 case. 491 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS); 492 } else { 493 // There is no BaseReg; this is the plain [disp16] case. 494 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS); 495 } 496 497 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases. 498 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups); 499 return; 500 } 501 502 // Determine whether a SIB byte is needed. 503 // If no BaseReg, issue a RIP relative instruction only if the MCE can 504 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table 505 // 2-7) and absolute references. 506 507 if (// The SIB byte must be used if there is an index register. 508 IndexReg.getReg() == 0 && 509 // The SIB byte must be used if the base is ESP/RSP/R12, all of which 510 // encode to an R/M value of 4, which indicates that a SIB byte is 511 // present. 512 BaseRegNo != N86::ESP && 513 // If there is no base register and we're in 64-bit mode, we need a SIB 514 // byte to emit an addr that is just 'disp32' (the non-RIP relative form). 515 (!is64BitMode(STI) || BaseReg != 0)) { 516 517 if (BaseReg == 0) { // [disp32] in X86-32 mode 518 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 519 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups); 520 return; 521 } 522 523 // If the base is not EBP/ESP and there is no displacement, use simple 524 // indirect register encoding, this handles addresses like [EAX]. The 525 // encoding for [EBP] with no displacement means [disp32] so we handle it 526 // by emitting a displacement of 0 below. 527 if (BaseRegNo != N86::EBP) { 528 if (Disp.isImm() && Disp.getImm() == 0) { 529 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 530 return; 531 } 532 533 // If the displacement is @tlscall, treat it as a zero. 534 if (Disp.isExpr()) { 535 auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr()); 536 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) { 537 // This is exclusively used by call *a@tlscall(base). The relocation 538 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning. 539 Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc())); 540 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 541 return; 542 } 543 } 544 } 545 546 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. 547 if (Disp.isImm()) { 548 if (!HasEVEX && isDisp8(Disp.getImm())) { 549 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 550 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 551 return; 552 } 553 // Try EVEX compressed 8-bit displacement first; if failed, fall back to 554 // 32-bit displacement. 555 int CDisp8 = 0; 556 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 557 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 558 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, 559 CDisp8 - Disp.getImm()); 560 return; 561 } 562 } 563 564 // Otherwise, emit the most general non-SIB encoding: [REG+disp32] 565 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS); 566 unsigned Opcode = MI.getOpcode(); 567 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax 568 : X86::reloc_signed_4byte; 569 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS, 570 Fixups); 571 return; 572 } 573 574 // We need a SIB byte, so start by outputting the ModR/M byte first 575 assert(IndexReg.getReg() != X86::ESP && 576 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 577 578 bool ForceDisp32 = false; 579 bool ForceDisp8 = false; 580 int CDisp8 = 0; 581 int ImmOffset = 0; 582 if (BaseReg == 0) { 583 // If there is no base register, we emit the special case SIB byte with 584 // MOD=0, BASE=5, to JUST get the index, scale, and displacement. 585 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 586 ForceDisp32 = true; 587 } else if (!Disp.isImm()) { 588 // Emit the normal disp32 encoding. 589 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 590 ForceDisp32 = true; 591 } else if (Disp.getImm() == 0 && 592 // Base reg can't be anything that ends up with '5' as the base 593 // reg, it is the magic [*] nomenclature that indicates no base. 594 BaseRegNo != N86::EBP) { 595 // Emit no displacement ModR/M byte 596 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 597 } else if (!HasEVEX && isDisp8(Disp.getImm())) { 598 // Emit the disp8 encoding. 599 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 600 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 601 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 602 // Emit the disp8 encoding. 603 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 604 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 605 ImmOffset = CDisp8 - Disp.getImm(); 606 } else { 607 // Emit the normal disp32 encoding. 608 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 609 } 610 611 // Calculate what the SS field value should be... 612 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 }; 613 unsigned SS = SSTable[Scale.getImm()]; 614 615 if (BaseReg == 0) { 616 // Handle the SIB byte for the case where there is no base, see Intel 617 // Manual 2A, table 2-7. The displacement has already been output. 618 unsigned IndexRegNo; 619 if (IndexReg.getReg()) 620 IndexRegNo = GetX86RegNum(IndexReg); 621 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5) 622 IndexRegNo = 4; 623 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS); 624 } else { 625 unsigned IndexRegNo; 626 if (IndexReg.getReg()) 627 IndexRegNo = GetX86RegNum(IndexReg); 628 else 629 IndexRegNo = 4; // For example [ESP+1*<noreg>+4] 630 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS); 631 } 632 633 // Do we need to output a displacement? 634 if (ForceDisp8) 635 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset); 636 else if (ForceDisp32 || Disp.getImm() != 0) 637 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), 638 CurByte, OS, Fixups); 639 } 640 641 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix 642 /// called VEX. 643 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 644 int MemOperand, const MCInst &MI, 645 const MCInstrDesc &Desc, 646 raw_ostream &OS) const { 647 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); 648 649 uint64_t Encoding = TSFlags & X86II::EncodingMask; 650 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 651 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 652 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 653 654 // VEX_R: opcode externsion equivalent to REX.R in 655 // 1's complement (inverted) form 656 // 657 // 1: Same as REX_R=0 (must be 1 in 32-bit mode) 658 // 0: Same as REX_R=1 (64 bit mode only) 659 // 660 uint8_t VEX_R = 0x1; 661 uint8_t EVEX_R2 = 0x1; 662 663 // VEX_X: equivalent to REX.X, only used when a 664 // register is used for index in SIB Byte. 665 // 666 // 1: Same as REX.X=0 (must be 1 in 32-bit mode) 667 // 0: Same as REX.X=1 (64-bit mode only) 668 uint8_t VEX_X = 0x1; 669 670 // VEX_B: 671 // 672 // 1: Same as REX_B=0 (ignored in 32-bit mode) 673 // 0: Same as REX_B=1 (64 bit mode only) 674 // 675 uint8_t VEX_B = 0x1; 676 677 // VEX_W: opcode specific (use like REX.W, or used for 678 // opcode extension, or ignored, depending on the opcode byte) 679 uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0; 680 681 // VEX_5M (VEX m-mmmmm field): 682 // 683 // 0b00000: Reserved for future use 684 // 0b00001: implied 0F leading opcode 685 // 0b00010: implied 0F 38 leading opcode bytes 686 // 0b00011: implied 0F 3A leading opcode bytes 687 // 0b00100-0b11111: Reserved for future use 688 // 0b01000: XOP map select - 08h instructions with imm byte 689 // 0b01001: XOP map select - 09h instructions with no imm byte 690 // 0b01010: XOP map select - 0Ah instructions with imm dword 691 uint8_t VEX_5M; 692 switch (TSFlags & X86II::OpMapMask) { 693 default: llvm_unreachable("Invalid prefix!"); 694 case X86II::TB: VEX_5M = 0x1; break; // 0F 695 case X86II::T8: VEX_5M = 0x2; break; // 0F 38 696 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A 697 case X86II::XOP8: VEX_5M = 0x8; break; 698 case X86II::XOP9: VEX_5M = 0x9; break; 699 case X86II::XOPA: VEX_5M = 0xA; break; 700 } 701 702 // VEX_4V (VEX vvvv field): a register specifier 703 // (in 1's complement form) or 1111 if unused. 704 uint8_t VEX_4V = 0xf; 705 uint8_t EVEX_V2 = 0x1; 706 707 // EVEX_L2/VEX_L (Vector Length): 708 // 709 // L2 L 710 // 0 0: scalar or 128-bit vector 711 // 0 1: 256-bit vector 712 // 1 0: 512-bit vector 713 // 714 uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0; 715 uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0; 716 717 // VEX_PP: opcode extension providing equivalent 718 // functionality of a SIMD prefix 719 // 720 // 0b00: None 721 // 0b01: 66 722 // 0b10: F3 723 // 0b11: F2 724 // 725 uint8_t VEX_PP = 0; 726 switch (TSFlags & X86II::OpPrefixMask) { 727 case X86II::PD: VEX_PP = 0x1; break; // 66 728 case X86II::XS: VEX_PP = 0x2; break; // F3 729 case X86II::XD: VEX_PP = 0x3; break; // F2 730 } 731 732 // EVEX_U 733 uint8_t EVEX_U = 1; // Always '1' so far 734 735 // EVEX_z 736 uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0; 737 738 // EVEX_b 739 uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0; 740 741 // EVEX_rc 742 uint8_t EVEX_rc = 0; 743 744 // EVEX_aaa 745 uint8_t EVEX_aaa = 0; 746 747 bool EncodeRC = false; 748 749 // Classify VEX_B, VEX_4V, VEX_R, VEX_X 750 unsigned NumOps = Desc.getNumOperands(); 751 unsigned CurOp = X86II::getOperandBias(Desc); 752 753 switch (TSFlags & X86II::FormMask) { 754 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!"); 755 case X86II::RawFrm: 756 break; 757 case X86II::MRMDestMem: { 758 // MRMDestMem instructions forms: 759 // MemAddr, src1(ModR/M) 760 // MemAddr, src1(VEX_4V), src2(ModR/M) 761 // MemAddr, src1(ModR/M), imm8 762 // 763 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 764 VEX_B = ~(BaseRegEnc >> 3) & 1; 765 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 766 VEX_X = ~(IndexRegEnc >> 3) & 1; 767 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. 768 EVEX_V2 = ~(IndexRegEnc >> 4) & 1; 769 770 CurOp += X86::AddrNumOperands; 771 772 if (HasEVEX_K) 773 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 774 775 if (HasVEX_4V) { 776 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 777 VEX_4V = ~VRegEnc & 0xf; 778 EVEX_V2 = ~(VRegEnc >> 4) & 1; 779 } 780 781 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 782 VEX_R = ~(RegEnc >> 3) & 1; 783 EVEX_R2 = ~(RegEnc >> 4) & 1; 784 break; 785 } 786 case X86II::MRMSrcMem: { 787 // MRMSrcMem instructions forms: 788 // src1(ModR/M), MemAddr 789 // src1(ModR/M), src2(VEX_4V), MemAddr 790 // src1(ModR/M), MemAddr, imm8 791 // src1(ModR/M), MemAddr, src2(Imm[7:4]) 792 // 793 // FMA4: 794 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 795 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 796 VEX_R = ~(RegEnc >> 3) & 1; 797 EVEX_R2 = ~(RegEnc >> 4) & 1; 798 799 if (HasEVEX_K) 800 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 801 802 if (HasVEX_4V) { 803 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 804 VEX_4V = ~VRegEnc & 0xf; 805 EVEX_V2 = ~(VRegEnc >> 4) & 1; 806 } 807 808 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 809 VEX_B = ~(BaseRegEnc >> 3) & 1; 810 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 811 VEX_X = ~(IndexRegEnc >> 3) & 1; 812 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. 813 EVEX_V2 = ~(IndexRegEnc >> 4) & 1; 814 815 break; 816 } 817 case X86II::MRMSrcMem4VOp3: { 818 // Instruction format for 4VOp3: 819 // src1(ModR/M), MemAddr, src3(VEX_4V) 820 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 821 VEX_R = ~(RegEnc >> 3) & 1; 822 823 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 824 VEX_B = ~(BaseRegEnc >> 3) & 1; 825 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 826 VEX_X = ~(IndexRegEnc >> 3) & 1; 827 828 VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; 829 break; 830 } 831 case X86II::MRMSrcMemOp4: { 832 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 833 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 834 VEX_R = ~(RegEnc >> 3) & 1; 835 836 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 837 VEX_4V = ~VRegEnc & 0xf; 838 839 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 840 VEX_B = ~(BaseRegEnc >> 3) & 1; 841 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 842 VEX_X = ~(IndexRegEnc >> 3) & 1; 843 break; 844 } 845 case X86II::MRM0m: case X86II::MRM1m: 846 case X86II::MRM2m: case X86II::MRM3m: 847 case X86II::MRM4m: case X86II::MRM5m: 848 case X86II::MRM6m: case X86II::MRM7m: { 849 // MRM[0-9]m instructions forms: 850 // MemAddr 851 // src1(VEX_4V), MemAddr 852 if (HasVEX_4V) { 853 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 854 VEX_4V = ~VRegEnc & 0xf; 855 EVEX_V2 = ~(VRegEnc >> 4) & 1; 856 } 857 858 if (HasEVEX_K) 859 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 860 861 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 862 VEX_B = ~(BaseRegEnc >> 3) & 1; 863 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 864 VEX_X = ~(IndexRegEnc >> 3) & 1; 865 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. 866 EVEX_V2 = ~(IndexRegEnc >> 4) & 1; 867 868 break; 869 } 870 case X86II::MRMSrcReg: { 871 // MRMSrcReg instructions forms: 872 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 873 // dst(ModR/M), src1(ModR/M) 874 // dst(ModR/M), src1(ModR/M), imm8 875 // 876 // FMA4: 877 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 878 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 879 VEX_R = ~(RegEnc >> 3) & 1; 880 EVEX_R2 = ~(RegEnc >> 4) & 1; 881 882 if (HasEVEX_K) 883 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 884 885 if (HasVEX_4V) { 886 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 887 VEX_4V = ~VRegEnc & 0xf; 888 EVEX_V2 = ~(VRegEnc >> 4) & 1; 889 } 890 891 RegEnc = getX86RegEncoding(MI, CurOp++); 892 VEX_B = ~(RegEnc >> 3) & 1; 893 VEX_X = ~(RegEnc >> 4) & 1; 894 895 if (EVEX_b) { 896 if (HasEVEX_RC) { 897 unsigned RcOperand = NumOps-1; 898 assert(RcOperand >= CurOp); 899 EVEX_rc = MI.getOperand(RcOperand).getImm(); 900 assert(EVEX_rc <= 3 && "Invalid rounding control!"); 901 } 902 EncodeRC = true; 903 } 904 break; 905 } 906 case X86II::MRMSrcReg4VOp3: { 907 // Instruction format for 4VOp3: 908 // src1(ModR/M), src2(ModR/M), src3(VEX_4V) 909 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 910 VEX_R = ~(RegEnc >> 3) & 1; 911 912 RegEnc = getX86RegEncoding(MI, CurOp++); 913 VEX_B = ~(RegEnc >> 3) & 1; 914 915 VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; 916 break; 917 } 918 case X86II::MRMSrcRegOp4: { 919 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 920 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 921 VEX_R = ~(RegEnc >> 3) & 1; 922 923 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 924 VEX_4V = ~VRegEnc & 0xf; 925 926 // Skip second register source (encoded in Imm[7:4]) 927 ++CurOp; 928 929 RegEnc = getX86RegEncoding(MI, CurOp++); 930 VEX_B = ~(RegEnc >> 3) & 1; 931 VEX_X = ~(RegEnc >> 4) & 1; 932 break; 933 } 934 case X86II::MRMDestReg: { 935 // MRMDestReg instructions forms: 936 // dst(ModR/M), src(ModR/M) 937 // dst(ModR/M), src(ModR/M), imm8 938 // dst(ModR/M), src1(VEX_4V), src2(ModR/M) 939 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 940 VEX_B = ~(RegEnc >> 3) & 1; 941 VEX_X = ~(RegEnc >> 4) & 1; 942 943 if (HasEVEX_K) 944 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 945 946 if (HasVEX_4V) { 947 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 948 VEX_4V = ~VRegEnc & 0xf; 949 EVEX_V2 = ~(VRegEnc >> 4) & 1; 950 } 951 952 RegEnc = getX86RegEncoding(MI, CurOp++); 953 VEX_R = ~(RegEnc >> 3) & 1; 954 EVEX_R2 = ~(RegEnc >> 4) & 1; 955 if (EVEX_b) 956 EncodeRC = true; 957 break; 958 } 959 case X86II::MRM0r: case X86II::MRM1r: 960 case X86II::MRM2r: case X86II::MRM3r: 961 case X86II::MRM4r: case X86II::MRM5r: 962 case X86II::MRM6r: case X86II::MRM7r: { 963 // MRM0r-MRM7r instructions forms: 964 // dst(VEX_4V), src(ModR/M), imm8 965 if (HasVEX_4V) { 966 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 967 VEX_4V = ~VRegEnc & 0xf; 968 EVEX_V2 = ~(VRegEnc >> 4) & 1; 969 } 970 if (HasEVEX_K) 971 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 972 973 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 974 VEX_B = ~(RegEnc >> 3) & 1; 975 VEX_X = ~(RegEnc >> 4) & 1; 976 break; 977 } 978 } 979 980 if (Encoding == X86II::VEX || Encoding == X86II::XOP) { 981 // VEX opcode prefix can have 2 or 3 bytes 982 // 983 // 3 bytes: 984 // +-----+ +--------------+ +-------------------+ 985 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | 986 // +-----+ +--------------+ +-------------------+ 987 // 2 bytes: 988 // +-----+ +-------------------+ 989 // | C5h | | R | vvvv | L | pp | 990 // +-----+ +-------------------+ 991 // 992 // XOP uses a similar prefix: 993 // +-----+ +--------------+ +-------------------+ 994 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | 995 // +-----+ +--------------+ +-------------------+ 996 uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); 997 998 // Can we use the 2 byte VEX prefix? 999 if (!(MI.getFlags() & X86::IP_USE_VEX3) && 1000 Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { 1001 EmitByte(0xC5, CurByte, OS); 1002 EmitByte(LastByte | (VEX_R << 7), CurByte, OS); 1003 return; 1004 } 1005 1006 // 3 byte VEX prefix 1007 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS); 1008 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); 1009 EmitByte(LastByte | (VEX_W << 7), CurByte, OS); 1010 } else { 1011 assert(Encoding == X86II::EVEX && "unknown encoding!"); 1012 // EVEX opcode prefix can have 4 bytes 1013 // 1014 // +-----+ +--------------+ +-------------------+ +------------------------+ 1015 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | 1016 // +-----+ +--------------+ +-------------------+ +------------------------+ 1017 assert((VEX_5M & 0x3) == VEX_5M 1018 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"); 1019 1020 EmitByte(0x62, CurByte, OS); 1021 EmitByte((VEX_R << 7) | 1022 (VEX_X << 6) | 1023 (VEX_B << 5) | 1024 (EVEX_R2 << 4) | 1025 VEX_5M, CurByte, OS); 1026 EmitByte((VEX_W << 7) | 1027 (VEX_4V << 3) | 1028 (EVEX_U << 2) | 1029 VEX_PP, CurByte, OS); 1030 if (EncodeRC) 1031 EmitByte((EVEX_z << 7) | 1032 (EVEX_rc << 5) | 1033 (EVEX_b << 4) | 1034 (EVEX_V2 << 3) | 1035 EVEX_aaa, CurByte, OS); 1036 else 1037 EmitByte((EVEX_z << 7) | 1038 (EVEX_L2 << 6) | 1039 (VEX_L << 5) | 1040 (EVEX_b << 4) | 1041 (EVEX_V2 << 3) | 1042 EVEX_aaa, CurByte, OS); 1043 } 1044 } 1045 1046 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64 1047 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 1048 /// size, and 3) use of X86-64 extended registers. 1049 uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 1050 int MemOperand, 1051 const MCInstrDesc &Desc) const { 1052 uint8_t REX = 0; 1053 bool UsesHighByteReg = false; 1054 1055 if (TSFlags & X86II::REX_W) 1056 REX |= 1 << 3; // set REX.W 1057 1058 if (MI.getNumOperands() == 0) return REX; 1059 1060 unsigned NumOps = MI.getNumOperands(); 1061 unsigned CurOp = X86II::getOperandBias(Desc); 1062 1063 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 1064 for (unsigned i = CurOp; i != NumOps; ++i) { 1065 const MCOperand &MO = MI.getOperand(i); 1066 if (!MO.isReg()) continue; 1067 unsigned Reg = MO.getReg(); 1068 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) 1069 UsesHighByteReg = true; 1070 if (X86II::isX86_64NonExtLowByteReg(Reg)) 1071 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything 1072 // that returns non-zero. 1073 REX |= 0x40; // REX fixed encoding prefix 1074 } 1075 1076 switch (TSFlags & X86II::FormMask) { 1077 case X86II::AddRegFrm: 1078 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1079 break; 1080 case X86II::MRMSrcReg: 1081 case X86II::MRMSrcRegCC: 1082 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1083 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1084 break; 1085 case X86II::MRMSrcMem: 1086 case X86II::MRMSrcMemCC: 1087 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1088 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1089 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1090 CurOp += X86::AddrNumOperands; 1091 break; 1092 case X86II::MRMDestReg: 1093 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1094 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1095 break; 1096 case X86II::MRMDestMem: 1097 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1098 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1099 CurOp += X86::AddrNumOperands; 1100 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1101 break; 1102 case X86II::MRMXmCC: case X86II::MRMXm: 1103 case X86II::MRM0m: case X86II::MRM1m: 1104 case X86II::MRM2m: case X86II::MRM3m: 1105 case X86II::MRM4m: case X86II::MRM5m: 1106 case X86II::MRM6m: case X86II::MRM7m: 1107 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1108 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1109 break; 1110 case X86II::MRMXrCC: case X86II::MRMXr: 1111 case X86II::MRM0r: case X86II::MRM1r: 1112 case X86II::MRM2r: case X86II::MRM3r: 1113 case X86II::MRM4r: case X86II::MRM5r: 1114 case X86II::MRM6r: case X86II::MRM7r: 1115 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1116 break; 1117 } 1118 if (REX && UsesHighByteReg) 1119 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction"); 1120 1121 return REX; 1122 } 1123 1124 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed 1125 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte, 1126 unsigned SegOperand, 1127 const MCInst &MI, 1128 raw_ostream &OS) const { 1129 // Check for explicit segment override on memory operand. 1130 switch (MI.getOperand(SegOperand).getReg()) { 1131 default: llvm_unreachable("Unknown segment register!"); 1132 case 0: break; 1133 case X86::CS: EmitByte(0x2E, CurByte, OS); break; 1134 case X86::SS: EmitByte(0x36, CurByte, OS); break; 1135 case X86::DS: EmitByte(0x3E, CurByte, OS); break; 1136 case X86::ES: EmitByte(0x26, CurByte, OS); break; 1137 case X86::FS: EmitByte(0x64, CurByte, OS); break; 1138 case X86::GS: EmitByte(0x65, CurByte, OS); break; 1139 } 1140 } 1141 1142 /// Emit all instruction prefixes prior to the opcode. 1143 /// 1144 /// MemOperand is the operand # of the start of a memory operand if present. If 1145 /// Not present, it is -1. 1146 /// 1147 /// Returns true if a REX prefix was used. 1148 bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 1149 int MemOperand, const MCInst &MI, 1150 const MCInstrDesc &Desc, 1151 const MCSubtargetInfo &STI, 1152 raw_ostream &OS) const { 1153 bool Ret = false; 1154 // Emit the operand size opcode prefix as needed. 1155 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32 1156 : X86II::OpSize16)) 1157 EmitByte(0x66, CurByte, OS); 1158 1159 // Emit the LOCK opcode prefix. 1160 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK) 1161 EmitByte(0xF0, CurByte, OS); 1162 1163 // Emit the NOTRACK opcode prefix. 1164 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK) 1165 EmitByte(0x3E, CurByte, OS); 1166 1167 switch (TSFlags & X86II::OpPrefixMask) { 1168 case X86II::PD: // 66 1169 EmitByte(0x66, CurByte, OS); 1170 break; 1171 case X86II::XS: // F3 1172 EmitByte(0xF3, CurByte, OS); 1173 break; 1174 case X86II::XD: // F2 1175 EmitByte(0xF2, CurByte, OS); 1176 break; 1177 } 1178 1179 // Handle REX prefix. 1180 // FIXME: Can this come before F2 etc to simplify emission? 1181 if (is64BitMode(STI)) { 1182 if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) { 1183 EmitByte(0x40 | REX, CurByte, OS); 1184 Ret = true; 1185 } 1186 } else { 1187 assert(!(TSFlags & X86II::REX_W) && "REX.W requires 64bit mode."); 1188 } 1189 1190 // 0x0F escape code must be emitted just before the opcode. 1191 switch (TSFlags & X86II::OpMapMask) { 1192 case X86II::TB: // Two-byte opcode map 1193 case X86II::T8: // 0F 38 1194 case X86II::TA: // 0F 3A 1195 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller. 1196 EmitByte(0x0F, CurByte, OS); 1197 break; 1198 } 1199 1200 switch (TSFlags & X86II::OpMapMask) { 1201 case X86II::T8: // 0F 38 1202 EmitByte(0x38, CurByte, OS); 1203 break; 1204 case X86II::TA: // 0F 3A 1205 EmitByte(0x3A, CurByte, OS); 1206 break; 1207 } 1208 return Ret; 1209 } 1210 1211 void X86MCCodeEmitter:: 1212 encodeInstruction(const MCInst &MI, raw_ostream &OS, 1213 SmallVectorImpl<MCFixup> &Fixups, 1214 const MCSubtargetInfo &STI) const { 1215 unsigned Opcode = MI.getOpcode(); 1216 const MCInstrDesc &Desc = MCII.get(Opcode); 1217 uint64_t TSFlags = Desc.TSFlags; 1218 unsigned Flags = MI.getFlags(); 1219 1220 // Pseudo instructions don't get encoded. 1221 if ((TSFlags & X86II::FormMask) == X86II::Pseudo) 1222 return; 1223 1224 unsigned NumOps = Desc.getNumOperands(); 1225 unsigned CurOp = X86II::getOperandBias(Desc); 1226 1227 // Keep track of the current byte being emitted. 1228 unsigned CurByte = 0; 1229 1230 // Encoding type for this instruction. 1231 uint64_t Encoding = TSFlags & X86II::EncodingMask; 1232 1233 // It uses the VEX.VVVV field? 1234 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 1235 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg; 1236 1237 // It uses the EVEX.aaa field? 1238 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 1239 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 1240 1241 // Used if a register is encoded in 7:4 of immediate. 1242 unsigned I8RegNum = 0; 1243 1244 // Determine where the memory operand starts, if present. 1245 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags); 1246 if (MemoryOperand != -1) MemoryOperand += CurOp; 1247 1248 // Emit segment override opcode prefix as needed. 1249 if (MemoryOperand >= 0) 1250 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg, 1251 MI, OS); 1252 1253 // Emit the repeat opcode prefix as needed. 1254 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT) 1255 EmitByte(0xF3, CurByte, OS); 1256 if (Flags & X86::IP_HAS_REPEAT_NE) 1257 EmitByte(0xF2, CurByte, OS); 1258 1259 // Emit the address size opcode prefix as needed. 1260 bool need_address_override; 1261 uint64_t AdSize = TSFlags & X86II::AdSizeMask; 1262 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) || 1263 (is32BitMode(STI) && AdSize == X86II::AdSize16) || 1264 (is64BitMode(STI) && AdSize == X86II::AdSize32)) { 1265 need_address_override = true; 1266 } else if (MemoryOperand < 0) { 1267 need_address_override = false; 1268 } else if (is64BitMode(STI)) { 1269 assert(!Is16BitMemOperand(MI, MemoryOperand, STI)); 1270 need_address_override = Is32BitMemOperand(MI, MemoryOperand); 1271 } else if (is32BitMode(STI)) { 1272 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1273 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI); 1274 } else { 1275 assert(is16BitMode(STI)); 1276 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1277 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI); 1278 } 1279 1280 if (need_address_override) 1281 EmitByte(0x67, CurByte, OS); 1282 1283 bool Rex = false; 1284 if (Encoding == 0) 1285 Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS); 1286 else 1287 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); 1288 1289 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); 1290 1291 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 1292 BaseOpcode = 0x0F; // Weird 3DNow! encoding. 1293 1294 unsigned OpcodeOffset = 0; 1295 1296 uint64_t Form = TSFlags & X86II::FormMask; 1297 switch (Form) { 1298 default: errs() << "FORM: " << Form << "\n"; 1299 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); 1300 case X86II::Pseudo: 1301 llvm_unreachable("Pseudo instruction shouldn't be emitted"); 1302 case X86II::RawFrmDstSrc: { 1303 unsigned siReg = MI.getOperand(1).getReg(); 1304 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || 1305 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || 1306 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && 1307 "SI and DI register sizes do not match"); 1308 // Emit segment override opcode prefix as needed (not for %ds). 1309 if (MI.getOperand(2).getReg() != X86::DS) 1310 EmitSegmentOverridePrefix(CurByte, 2, MI, OS); 1311 // Emit AdSize prefix as needed. 1312 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1313 (is32BitMode(STI) && siReg == X86::SI)) 1314 EmitByte(0x67, CurByte, OS); 1315 CurOp += 3; // Consume operands. 1316 EmitByte(BaseOpcode, CurByte, OS); 1317 break; 1318 } 1319 case X86II::RawFrmSrc: { 1320 unsigned siReg = MI.getOperand(0).getReg(); 1321 // Emit segment override opcode prefix as needed (not for %ds). 1322 if (MI.getOperand(1).getReg() != X86::DS) 1323 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1324 // Emit AdSize prefix as needed. 1325 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1326 (is32BitMode(STI) && siReg == X86::SI)) 1327 EmitByte(0x67, CurByte, OS); 1328 CurOp += 2; // Consume operands. 1329 EmitByte(BaseOpcode, CurByte, OS); 1330 break; 1331 } 1332 case X86II::RawFrmDst: { 1333 unsigned siReg = MI.getOperand(0).getReg(); 1334 // Emit AdSize prefix as needed. 1335 if ((!is32BitMode(STI) && siReg == X86::EDI) || 1336 (is32BitMode(STI) && siReg == X86::DI)) 1337 EmitByte(0x67, CurByte, OS); 1338 ++CurOp; // Consume operand. 1339 EmitByte(BaseOpcode, CurByte, OS); 1340 break; 1341 } 1342 case X86II::AddCCFrm: { 1343 // This will be added to the opcode in the fallthrough. 1344 OpcodeOffset = MI.getOperand(NumOps - 1).getImm(); 1345 assert(OpcodeOffset < 16 && "Unexpected opcode offset!"); 1346 --NumOps; // Drop the operand from the end. 1347 LLVM_FALLTHROUGH; 1348 case X86II::RawFrm: 1349 EmitByte(BaseOpcode + OpcodeOffset, CurByte, OS); 1350 1351 if (!is64BitMode(STI) || !isPCRel32Branch(MI)) 1352 break; 1353 1354 const MCOperand &Op = MI.getOperand(CurOp++); 1355 EmitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags), 1356 MCFixupKind(X86::reloc_branch_4byte_pcrel), CurByte, OS, 1357 Fixups); 1358 break; 1359 } 1360 case X86II::RawFrmMemOffs: 1361 // Emit segment override opcode prefix as needed. 1362 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1363 EmitByte(BaseOpcode, CurByte, OS); 1364 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1365 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1366 CurByte, OS, Fixups); 1367 ++CurOp; // skip segment operand 1368 break; 1369 case X86II::RawFrmImm8: 1370 EmitByte(BaseOpcode, CurByte, OS); 1371 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1372 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1373 CurByte, OS, Fixups); 1374 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte, 1375 OS, Fixups); 1376 break; 1377 case X86II::RawFrmImm16: 1378 EmitByte(BaseOpcode, CurByte, OS); 1379 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1380 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1381 CurByte, OS, Fixups); 1382 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte, 1383 OS, Fixups); 1384 break; 1385 1386 case X86II::AddRegFrm: 1387 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); 1388 break; 1389 1390 case X86II::MRMDestReg: { 1391 EmitByte(BaseOpcode, CurByte, OS); 1392 unsigned SrcRegNum = CurOp + 1; 1393 1394 if (HasEVEX_K) // Skip writemask 1395 ++SrcRegNum; 1396 1397 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1398 ++SrcRegNum; 1399 1400 EmitRegModRMByte(MI.getOperand(CurOp), 1401 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS); 1402 CurOp = SrcRegNum + 1; 1403 break; 1404 } 1405 case X86II::MRMDestMem: { 1406 EmitByte(BaseOpcode, CurByte, OS); 1407 unsigned SrcRegNum = CurOp + X86::AddrNumOperands; 1408 1409 if (HasEVEX_K) // Skip writemask 1410 ++SrcRegNum; 1411 1412 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1413 ++SrcRegNum; 1414 1415 emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, 1416 Rex, CurByte, OS, Fixups, STI); 1417 CurOp = SrcRegNum + 1; 1418 break; 1419 } 1420 case X86II::MRMSrcReg: { 1421 EmitByte(BaseOpcode, CurByte, OS); 1422 unsigned SrcRegNum = CurOp + 1; 1423 1424 if (HasEVEX_K) // Skip writemask 1425 ++SrcRegNum; 1426 1427 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1428 ++SrcRegNum; 1429 1430 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1431 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1432 CurOp = SrcRegNum + 1; 1433 if (HasVEX_I8Reg) 1434 I8RegNum = getX86RegEncoding(MI, CurOp++); 1435 // do not count the rounding control operand 1436 if (HasEVEX_RC) 1437 --NumOps; 1438 break; 1439 } 1440 case X86II::MRMSrcReg4VOp3: { 1441 EmitByte(BaseOpcode, CurByte, OS); 1442 unsigned SrcRegNum = CurOp + 1; 1443 1444 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1445 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1446 CurOp = SrcRegNum + 1; 1447 ++CurOp; // Encoded in VEX.VVVV 1448 break; 1449 } 1450 case X86II::MRMSrcRegOp4: { 1451 EmitByte(BaseOpcode, CurByte, OS); 1452 unsigned SrcRegNum = CurOp + 1; 1453 1454 // Skip 1st src (which is encoded in VEX_VVVV) 1455 ++SrcRegNum; 1456 1457 // Capture 2nd src (which is encoded in Imm[7:4]) 1458 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1459 I8RegNum = getX86RegEncoding(MI, SrcRegNum++); 1460 1461 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1462 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1463 CurOp = SrcRegNum + 1; 1464 break; 1465 } 1466 case X86II::MRMSrcRegCC: { 1467 unsigned FirstOp = CurOp++; 1468 unsigned SecondOp = CurOp++; 1469 1470 unsigned CC = MI.getOperand(CurOp++).getImm(); 1471 EmitByte(BaseOpcode + CC, CurByte, OS); 1472 1473 EmitRegModRMByte(MI.getOperand(SecondOp), 1474 GetX86RegNum(MI.getOperand(FirstOp)), CurByte, OS); 1475 break; 1476 } 1477 case X86II::MRMSrcMem: { 1478 unsigned FirstMemOp = CurOp+1; 1479 1480 if (HasEVEX_K) // Skip writemask 1481 ++FirstMemOp; 1482 1483 if (HasVEX_4V) 1484 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1485 1486 EmitByte(BaseOpcode, CurByte, OS); 1487 1488 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1489 TSFlags, Rex, CurByte, OS, Fixups, STI); 1490 CurOp = FirstMemOp + X86::AddrNumOperands; 1491 if (HasVEX_I8Reg) 1492 I8RegNum = getX86RegEncoding(MI, CurOp++); 1493 break; 1494 } 1495 case X86II::MRMSrcMem4VOp3: { 1496 unsigned FirstMemOp = CurOp+1; 1497 1498 EmitByte(BaseOpcode, CurByte, OS); 1499 1500 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1501 TSFlags, Rex, CurByte, OS, Fixups, STI); 1502 CurOp = FirstMemOp + X86::AddrNumOperands; 1503 ++CurOp; // Encoded in VEX.VVVV. 1504 break; 1505 } 1506 case X86II::MRMSrcMemOp4: { 1507 unsigned FirstMemOp = CurOp+1; 1508 1509 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1510 1511 // Capture second register source (encoded in Imm[7:4]) 1512 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1513 I8RegNum = getX86RegEncoding(MI, FirstMemOp++); 1514 1515 EmitByte(BaseOpcode, CurByte, OS); 1516 1517 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1518 TSFlags, Rex, CurByte, OS, Fixups, STI); 1519 CurOp = FirstMemOp + X86::AddrNumOperands; 1520 break; 1521 } 1522 case X86II::MRMSrcMemCC: { 1523 unsigned RegOp = CurOp++; 1524 unsigned FirstMemOp = CurOp; 1525 CurOp = FirstMemOp + X86::AddrNumOperands; 1526 1527 unsigned CC = MI.getOperand(CurOp++).getImm(); 1528 EmitByte(BaseOpcode + CC, CurByte, OS); 1529 1530 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(RegOp)), 1531 TSFlags, Rex, CurByte, OS, Fixups, STI); 1532 break; 1533 } 1534 1535 case X86II::MRMXrCC: { 1536 unsigned RegOp = CurOp++; 1537 1538 unsigned CC = MI.getOperand(CurOp++).getImm(); 1539 EmitByte(BaseOpcode + CC, CurByte, OS); 1540 EmitRegModRMByte(MI.getOperand(RegOp), 0, CurByte, OS); 1541 break; 1542 } 1543 1544 case X86II::MRMXr: 1545 case X86II::MRM0r: case X86II::MRM1r: 1546 case X86II::MRM2r: case X86II::MRM3r: 1547 case X86II::MRM4r: case X86II::MRM5r: 1548 case X86II::MRM6r: case X86II::MRM7r: 1549 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1550 ++CurOp; 1551 if (HasEVEX_K) // Skip writemask 1552 ++CurOp; 1553 EmitByte(BaseOpcode, CurByte, OS); 1554 EmitRegModRMByte(MI.getOperand(CurOp++), 1555 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r, 1556 CurByte, OS); 1557 break; 1558 1559 case X86II::MRMXmCC: { 1560 unsigned FirstMemOp = CurOp; 1561 CurOp = FirstMemOp + X86::AddrNumOperands; 1562 1563 unsigned CC = MI.getOperand(CurOp++).getImm(); 1564 EmitByte(BaseOpcode + CC, CurByte, OS); 1565 1566 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Rex, CurByte, OS, Fixups, STI); 1567 break; 1568 } 1569 1570 case X86II::MRMXm: 1571 case X86II::MRM0m: case X86II::MRM1m: 1572 case X86II::MRM2m: case X86II::MRM3m: 1573 case X86II::MRM4m: case X86II::MRM5m: 1574 case X86II::MRM6m: case X86II::MRM7m: 1575 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1576 ++CurOp; 1577 if (HasEVEX_K) // Skip writemask 1578 ++CurOp; 1579 EmitByte(BaseOpcode, CurByte, OS); 1580 emitMemModRMByte(MI, CurOp, 1581 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags, 1582 Rex, CurByte, OS, Fixups, STI); 1583 CurOp += X86::AddrNumOperands; 1584 break; 1585 1586 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2: 1587 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5: 1588 case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8: 1589 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB: 1590 case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE: 1591 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1: 1592 case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4: 1593 case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7: 1594 case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA: 1595 case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD: 1596 case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0: 1597 case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3: 1598 case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6: 1599 case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9: 1600 case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC: 1601 case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF: 1602 case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2: 1603 case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5: 1604 case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8: 1605 case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB: 1606 case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE: 1607 case X86II::MRM_FF: 1608 EmitByte(BaseOpcode, CurByte, OS); 1609 EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS); 1610 break; 1611 } 1612 1613 if (HasVEX_I8Reg) { 1614 // The last source register of a 4 operand instruction in AVX is encoded 1615 // in bits[7:4] of a immediate byte. 1616 assert(I8RegNum < 16 && "Register encoding out of range"); 1617 I8RegNum <<= 4; 1618 if (CurOp != NumOps) { 1619 unsigned Val = MI.getOperand(CurOp++).getImm(); 1620 assert(Val < 16 && "Immediate operand value out of range"); 1621 I8RegNum |= Val; 1622 } 1623 EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1, 1624 CurByte, OS, Fixups); 1625 } else { 1626 // If there is a remaining operand, it must be a trailing immediate. Emit it 1627 // according to the right size for the instruction. Some instructions 1628 // (SSE4a extrq and insertq) have two trailing immediates. 1629 while (CurOp != NumOps && NumOps - CurOp <= 2) { 1630 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1631 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1632 CurByte, OS, Fixups); 1633 } 1634 } 1635 1636 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 1637 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS); 1638 1639 #ifndef NDEBUG 1640 // FIXME: Verify. 1641 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { 1642 errs() << "Cannot encode all operands of: "; 1643 MI.dump(); 1644 errs() << '\n'; 1645 abort(); 1646 } 1647 #endif 1648 } 1649 1650 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, 1651 const MCRegisterInfo &MRI, 1652 MCContext &Ctx) { 1653 return new X86MCCodeEmitter(MCII, Ctx); 1654 } 1655