1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the X86MCCodeEmitter class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/X86BaseInfo.h" 14 #include "MCTargetDesc/X86FixupKinds.h" 15 #include "MCTargetDesc/X86MCTargetDesc.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/MC/MCCodeEmitter.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCFixup.h" 21 #include "llvm/MC/MCInst.h" 22 #include "llvm/MC/MCInstrDesc.h" 23 #include "llvm/MC/MCInstrInfo.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSubtargetInfo.h" 26 #include "llvm/MC/MCSymbol.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <cassert> 30 #include <cstdint> 31 #include <cstdlib> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "mccodeemitter" 36 37 namespace { 38 39 class X86MCCodeEmitter : public MCCodeEmitter { 40 const MCInstrInfo &MCII; 41 MCContext &Ctx; 42 43 public: 44 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 45 : MCII(mcii), Ctx(ctx) { 46 } 47 X86MCCodeEmitter(const X86MCCodeEmitter &) = delete; 48 X86MCCodeEmitter &operator=(const X86MCCodeEmitter &) = delete; 49 ~X86MCCodeEmitter() override = default; 50 51 bool is64BitMode(const MCSubtargetInfo &STI) const { 52 return STI.getFeatureBits()[X86::Mode64Bit]; 53 } 54 55 bool is32BitMode(const MCSubtargetInfo &STI) const { 56 return STI.getFeatureBits()[X86::Mode32Bit]; 57 } 58 59 bool is16BitMode(const MCSubtargetInfo &STI) const { 60 return STI.getFeatureBits()[X86::Mode16Bit]; 61 } 62 63 /// Is16BitMemOperand - Return true if the specified instruction has 64 /// a 16-bit memory operand. Op specifies the operand # of the memoperand. 65 bool Is16BitMemOperand(const MCInst &MI, unsigned Op, 66 const MCSubtargetInfo &STI) const { 67 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 68 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 69 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 70 71 if (is16BitMode(STI) && BaseReg.getReg() == 0 && 72 Disp.isImm() && Disp.getImm() < 0x10000) 73 return true; 74 if ((BaseReg.getReg() != 0 && 75 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) || 76 (IndexReg.getReg() != 0 && 77 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg()))) 78 return true; 79 return false; 80 } 81 82 unsigned GetX86RegNum(const MCOperand &MO) const { 83 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; 84 } 85 86 unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const { 87 return Ctx.getRegisterInfo()->getEncodingValue( 88 MI.getOperand(OpNum).getReg()); 89 } 90 91 // Does this register require a bit to be set in REX prefix. 92 bool isREXExtendedReg(const MCInst &MI, unsigned OpNum) const { 93 return (getX86RegEncoding(MI, OpNum) >> 3) & 1; 94 } 95 96 void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const { 97 OS << (char)C; 98 ++CurByte; 99 } 100 101 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, 102 raw_ostream &OS) const { 103 // Output the constant in little endian byte order. 104 for (unsigned i = 0; i != Size; ++i) { 105 EmitByte(Val & 255, CurByte, OS); 106 Val >>= 8; 107 } 108 } 109 110 void EmitImmediate(const MCOperand &Disp, SMLoc Loc, 111 unsigned ImmSize, MCFixupKind FixupKind, 112 unsigned &CurByte, raw_ostream &OS, 113 SmallVectorImpl<MCFixup> &Fixups, 114 int ImmOffset = 0) const; 115 116 static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode, unsigned RM) { 117 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!"); 118 return RM | (RegOpcode << 3) | (Mod << 6); 119 } 120 121 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, 122 unsigned &CurByte, raw_ostream &OS) const { 123 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS); 124 } 125 126 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base, 127 unsigned &CurByte, raw_ostream &OS) const { 128 // SIB byte is in the same format as the ModRMByte. 129 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS); 130 } 131 132 void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField, 133 uint64_t TSFlags, bool Rex, unsigned &CurByte, 134 raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups, 135 const MCSubtargetInfo &STI) const; 136 137 void encodeInstruction(const MCInst &MI, raw_ostream &OS, 138 SmallVectorImpl<MCFixup> &Fixups, 139 const MCSubtargetInfo &STI) const override; 140 141 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 142 const MCInst &MI, const MCInstrDesc &Desc, 143 raw_ostream &OS) const; 144 145 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand, 146 const MCInst &MI, raw_ostream &OS) const; 147 148 bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 149 const MCInst &MI, const MCInstrDesc &Desc, 150 const MCSubtargetInfo &STI, raw_ostream &OS) const; 151 152 uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 153 int MemOperand, const MCInstrDesc &Desc) const; 154 155 bool isPCRel32Branch(const MCInst &MI) const; 156 }; 157 158 } // end anonymous namespace 159 160 /// isDisp8 - Return true if this signed displacement fits in a 8-bit 161 /// sign-extended field. 162 static bool isDisp8(int Value) { 163 return Value == (int8_t)Value; 164 } 165 166 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit 167 /// compressed dispacement field. 168 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) { 169 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) && 170 "Compressed 8-bit displacement is only valid for EVEX inst."); 171 172 unsigned CD8_Scale = 173 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift; 174 if (CD8_Scale == 0) { 175 CValue = Value; 176 return isDisp8(Value); 177 } 178 179 unsigned Mask = CD8_Scale - 1; 180 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size."); 181 if (Value & Mask) // Unaligned offset 182 return false; 183 Value /= (int)CD8_Scale; 184 bool Ret = (Value == (int8_t)Value); 185 186 if (Ret) 187 CValue = Value; 188 return Ret; 189 } 190 191 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate 192 /// in an instruction with the specified TSFlags. 193 static MCFixupKind getImmFixupKind(uint64_t TSFlags) { 194 unsigned Size = X86II::getSizeOfImm(TSFlags); 195 bool isPCRel = X86II::isImmPCRel(TSFlags); 196 197 if (X86II::isImmSigned(TSFlags)) { 198 switch (Size) { 199 default: llvm_unreachable("Unsupported signed fixup size!"); 200 case 4: return MCFixupKind(X86::reloc_signed_4byte); 201 } 202 } 203 return MCFixup::getKindForSize(Size, isPCRel); 204 } 205 206 /// Is32BitMemOperand - Return true if the specified instruction has 207 /// a 32-bit memory operand. Op specifies the operand # of the memoperand. 208 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) { 209 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 210 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 211 212 if ((BaseReg.getReg() != 0 && 213 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || 214 (IndexReg.getReg() != 0 && 215 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) 216 return true; 217 if (BaseReg.getReg() == X86::EIP) { 218 assert(IndexReg.getReg() == 0 && "Invalid eip-based address."); 219 return true; 220 } 221 if (IndexReg.getReg() == X86::EIZ) 222 return true; 223 return false; 224 } 225 226 /// Is64BitMemOperand - Return true if the specified instruction has 227 /// a 64-bit memory operand. Op specifies the operand # of the memoperand. 228 #ifndef NDEBUG 229 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) { 230 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 231 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 232 233 if ((BaseReg.getReg() != 0 && 234 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) || 235 (IndexReg.getReg() != 0 && 236 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg()))) 237 return true; 238 return false; 239 } 240 #endif 241 242 /// StartsWithGlobalOffsetTable - Check if this expression starts with 243 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form 244 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF 245 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that 246 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start 247 /// of a binary expression. 248 enum GlobalOffsetTableExprKind { 249 GOT_None, 250 GOT_Normal, 251 GOT_SymDiff 252 }; 253 static GlobalOffsetTableExprKind 254 StartsWithGlobalOffsetTable(const MCExpr *Expr) { 255 const MCExpr *RHS = nullptr; 256 if (Expr->getKind() == MCExpr::Binary) { 257 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); 258 Expr = BE->getLHS(); 259 RHS = BE->getRHS(); 260 } 261 262 if (Expr->getKind() != MCExpr::SymbolRef) 263 return GOT_None; 264 265 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 266 const MCSymbol &S = Ref->getSymbol(); 267 if (S.getName() != "_GLOBAL_OFFSET_TABLE_") 268 return GOT_None; 269 if (RHS && RHS->getKind() == MCExpr::SymbolRef) 270 return GOT_SymDiff; 271 return GOT_Normal; 272 } 273 274 static bool HasSecRelSymbolRef(const MCExpr *Expr) { 275 if (Expr->getKind() == MCExpr::SymbolRef) { 276 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 277 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; 278 } 279 return false; 280 } 281 282 bool X86MCCodeEmitter::isPCRel32Branch(const MCInst &MI) const { 283 unsigned Opcode = MI.getOpcode(); 284 const MCInstrDesc &Desc = MCII.get(Opcode); 285 if ((Opcode != X86::CALL64pcrel32 && Opcode != X86::JMP_4) || 286 getImmFixupKind(Desc.TSFlags) != FK_PCRel_4) 287 return false; 288 289 unsigned CurOp = X86II::getOperandBias(Desc); 290 const MCOperand &Op = MI.getOperand(CurOp); 291 if (!Op.isExpr()) 292 return false; 293 294 const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Op.getExpr()); 295 return Ref && Ref->getKind() == MCSymbolRefExpr::VK_None; 296 } 297 298 void X86MCCodeEmitter:: 299 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, 300 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, 301 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const { 302 const MCExpr *Expr = nullptr; 303 if (DispOp.isImm()) { 304 // If this is a simple integer displacement that doesn't require a 305 // relocation, emit it now. 306 if (FixupKind != FK_PCRel_1 && 307 FixupKind != FK_PCRel_2 && 308 FixupKind != FK_PCRel_4) { 309 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS); 310 return; 311 } 312 Expr = MCConstantExpr::create(DispOp.getImm(), Ctx); 313 } else { 314 Expr = DispOp.getExpr(); 315 } 316 317 // If we have an immoffset, add it to the expression. 318 if ((FixupKind == FK_Data_4 || 319 FixupKind == FK_Data_8 || 320 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { 321 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr); 322 if (Kind != GOT_None) { 323 assert(ImmOffset == 0); 324 325 if (Size == 8) { 326 FixupKind = MCFixupKind(X86::reloc_global_offset_table8); 327 } else { 328 assert(Size == 4); 329 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 330 } 331 332 if (Kind == GOT_Normal) 333 ImmOffset = CurByte; 334 } else if (Expr->getKind() == MCExpr::SymbolRef) { 335 if (HasSecRelSymbolRef(Expr)) { 336 FixupKind = MCFixupKind(FK_SecRel_4); 337 } 338 } else if (Expr->getKind() == MCExpr::Binary) { 339 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr); 340 if (HasSecRelSymbolRef(Bin->getLHS()) 341 || HasSecRelSymbolRef(Bin->getRHS())) { 342 FixupKind = MCFixupKind(FK_SecRel_4); 343 } 344 } 345 } 346 347 // If the fixup is pc-relative, we need to bias the value to be relative to 348 // the start of the field, not the end of the field. 349 if (FixupKind == FK_PCRel_4 || 350 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || 351 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) || 352 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) || 353 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex) || 354 FixupKind == MCFixupKind(X86::reloc_branch_4byte_pcrel)) { 355 ImmOffset -= 4; 356 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_: 357 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15 358 // this needs to be a GOTPC32 relocation. 359 if (StartsWithGlobalOffsetTable(Expr) != GOT_None) 360 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 361 } 362 if (FixupKind == FK_PCRel_2) 363 ImmOffset -= 2; 364 if (FixupKind == FK_PCRel_1) 365 ImmOffset -= 1; 366 367 if (ImmOffset) 368 Expr = MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(ImmOffset, Ctx), 369 Ctx); 370 371 // Emit a symbolic constant as a fixup and 4 zeros. 372 Fixups.push_back(MCFixup::create(CurByte, Expr, FixupKind, Loc)); 373 EmitConstant(0, Size, CurByte, OS); 374 } 375 376 void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op, 377 unsigned RegOpcodeField, 378 uint64_t TSFlags, bool Rex, 379 unsigned &CurByte, raw_ostream &OS, 380 SmallVectorImpl<MCFixup> &Fixups, 381 const MCSubtargetInfo &STI) const { 382 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 383 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg); 384 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt); 385 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 386 unsigned BaseReg = Base.getReg(); 387 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX; 388 389 // Handle %rip relative addressing. 390 if (BaseReg == X86::RIP || 391 BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode 392 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode"); 393 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address"); 394 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 395 396 unsigned Opcode = MI.getOpcode(); 397 // movq loads are handled with a special relocation form which allows the 398 // linker to eliminate some loads for GOT references which end up in the 399 // same linkage unit. 400 unsigned FixupKind = [=]() { 401 switch (Opcode) { 402 default: 403 return X86::reloc_riprel_4byte; 404 case X86::MOV64rm: 405 assert(Rex); 406 return X86::reloc_riprel_4byte_movq_load; 407 case X86::CALL64m: 408 case X86::JMP64m: 409 case X86::TAILJMPm64: 410 case X86::TEST64mr: 411 case X86::ADC64rm: 412 case X86::ADD64rm: 413 case X86::AND64rm: 414 case X86::CMP64rm: 415 case X86::OR64rm: 416 case X86::SBB64rm: 417 case X86::SUB64rm: 418 case X86::XOR64rm: 419 return Rex ? X86::reloc_riprel_4byte_relax_rex 420 : X86::reloc_riprel_4byte_relax; 421 } 422 }(); 423 424 // rip-relative addressing is actually relative to the *next* instruction. 425 // Since an immediate can follow the mod/rm byte for an instruction, this 426 // means that we need to bias the displacement field of the instruction with 427 // the size of the immediate field. If we have this case, add it into the 428 // expression to emit. 429 // Note: rip-relative addressing using immediate displacement values should 430 // not be adjusted, assuming it was the user's intent. 431 int ImmSize = !Disp.isImm() && X86II::hasImm(TSFlags) 432 ? X86II::getSizeOfImm(TSFlags) 433 : 0; 434 435 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), 436 CurByte, OS, Fixups, -ImmSize); 437 return; 438 } 439 440 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U; 441 442 // 16-bit addressing forms of the ModR/M byte have a different encoding for 443 // the R/M field and are far more limited in which registers can be used. 444 if (Is16BitMemOperand(MI, Op, STI)) { 445 if (BaseReg) { 446 // For 32-bit addressing, the row and column values in Table 2-2 are 447 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with 448 // some special cases. And GetX86RegNum reflects that numbering. 449 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A, 450 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only 451 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order, 452 // while values 0-3 indicate the allowed combinations (base+index) of 453 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI. 454 // 455 // R16Table[] is a lookup from the normal RegNo, to the row values from 456 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed. 457 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 }; 458 unsigned RMfield = R16Table[BaseRegNo]; 459 460 assert(RMfield && "invalid 16-bit base register"); 461 462 if (IndexReg.getReg()) { 463 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)]; 464 465 assert(IndexReg16 && "invalid 16-bit index register"); 466 // We must have one of SI/DI (4,5), and one of BP/BX (6,7). 467 assert(((IndexReg16 ^ RMfield) & 2) && 468 "invalid 16-bit base/index register combination"); 469 assert(Scale.getImm() == 1 && 470 "invalid scale for 16-bit memory reference"); 471 472 // Allow base/index to appear in either order (although GAS doesn't). 473 if (IndexReg16 & 2) 474 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1); 475 else 476 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1); 477 } 478 479 if (Disp.isImm() && isDisp8(Disp.getImm())) { 480 if (Disp.getImm() == 0 && RMfield != 6) { 481 // There is no displacement; just the register. 482 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS); 483 return; 484 } 485 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded. 486 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS); 487 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 488 return; 489 } 490 // This is the [REG]+disp16 case. 491 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS); 492 } else { 493 // There is no BaseReg; this is the plain [disp16] case. 494 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS); 495 } 496 497 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases. 498 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups); 499 return; 500 } 501 502 // Determine whether a SIB byte is needed. 503 // If no BaseReg, issue a RIP relative instruction only if the MCE can 504 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table 505 // 2-7) and absolute references. 506 507 if (// The SIB byte must be used if there is an index register. 508 IndexReg.getReg() == 0 && 509 // The SIB byte must be used if the base is ESP/RSP/R12, all of which 510 // encode to an R/M value of 4, which indicates that a SIB byte is 511 // present. 512 BaseRegNo != N86::ESP && 513 // If there is no base register and we're in 64-bit mode, we need a SIB 514 // byte to emit an addr that is just 'disp32' (the non-RIP relative form). 515 (!is64BitMode(STI) || BaseReg != 0)) { 516 517 if (BaseReg == 0) { // [disp32] in X86-32 mode 518 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 519 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups); 520 return; 521 } 522 523 // If the base is not EBP/ESP and there is no displacement, use simple 524 // indirect register encoding, this handles addresses like [EAX]. The 525 // encoding for [EBP] with no displacement means [disp32] so we handle it 526 // by emitting a displacement of 0 below. 527 if (BaseRegNo != N86::EBP) { 528 if (Disp.isImm() && Disp.getImm() == 0) { 529 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 530 return; 531 } 532 533 // If the displacement is @tlscall, treat it as a zero. 534 if (Disp.isExpr()) { 535 auto *Sym = dyn_cast<MCSymbolRefExpr>(Disp.getExpr()); 536 if (Sym && Sym->getKind() == MCSymbolRefExpr::VK_TLSCALL) { 537 // This is exclusively used by call *a@tlscall(base). The relocation 538 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning. 539 Fixups.push_back(MCFixup::create(0, Sym, FK_NONE, MI.getLoc())); 540 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 541 return; 542 } 543 } 544 } 545 546 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. 547 if (Disp.isImm()) { 548 if (!HasEVEX && isDisp8(Disp.getImm())) { 549 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 550 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 551 return; 552 } 553 // Try EVEX compressed 8-bit displacement first; if failed, fall back to 554 // 32-bit displacement. 555 int CDisp8 = 0; 556 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 557 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 558 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, 559 CDisp8 - Disp.getImm()); 560 return; 561 } 562 } 563 564 // Otherwise, emit the most general non-SIB encoding: [REG+disp32] 565 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS); 566 unsigned Opcode = MI.getOpcode(); 567 unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax 568 : X86::reloc_signed_4byte; 569 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS, 570 Fixups); 571 return; 572 } 573 574 // We need a SIB byte, so start by outputting the ModR/M byte first 575 assert(IndexReg.getReg() != X86::ESP && 576 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 577 578 bool ForceDisp32 = false; 579 bool ForceDisp8 = false; 580 int CDisp8 = 0; 581 int ImmOffset = 0; 582 if (BaseReg == 0) { 583 // If there is no base register, we emit the special case SIB byte with 584 // MOD=0, BASE=5, to JUST get the index, scale, and displacement. 585 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 586 ForceDisp32 = true; 587 } else if (!Disp.isImm()) { 588 // Emit the normal disp32 encoding. 589 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 590 ForceDisp32 = true; 591 } else if (Disp.getImm() == 0 && 592 // Base reg can't be anything that ends up with '5' as the base 593 // reg, it is the magic [*] nomenclature that indicates no base. 594 BaseRegNo != N86::EBP) { 595 // Emit no displacement ModR/M byte 596 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 597 } else if (!HasEVEX && isDisp8(Disp.getImm())) { 598 // Emit the disp8 encoding. 599 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 600 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 601 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 602 // Emit the disp8 encoding. 603 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 604 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 605 ImmOffset = CDisp8 - Disp.getImm(); 606 } else { 607 // Emit the normal disp32 encoding. 608 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 609 } 610 611 // Calculate what the SS field value should be... 612 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 }; 613 unsigned SS = SSTable[Scale.getImm()]; 614 615 if (BaseReg == 0) { 616 // Handle the SIB byte for the case where there is no base, see Intel 617 // Manual 2A, table 2-7. The displacement has already been output. 618 unsigned IndexRegNo; 619 if (IndexReg.getReg()) 620 IndexRegNo = GetX86RegNum(IndexReg); 621 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5) 622 IndexRegNo = 4; 623 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS); 624 } else { 625 unsigned IndexRegNo; 626 if (IndexReg.getReg()) 627 IndexRegNo = GetX86RegNum(IndexReg); 628 else 629 IndexRegNo = 4; // For example [ESP+1*<noreg>+4] 630 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS); 631 } 632 633 // Do we need to output a displacement? 634 if (ForceDisp8) 635 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset); 636 else if (ForceDisp32 || Disp.getImm() != 0) 637 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), 638 CurByte, OS, Fixups); 639 } 640 641 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix 642 /// called VEX. 643 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 644 int MemOperand, const MCInst &MI, 645 const MCInstrDesc &Desc, 646 raw_ostream &OS) const { 647 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); 648 649 uint64_t Encoding = TSFlags & X86II::EncodingMask; 650 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 651 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 652 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 653 654 // VEX_R: opcode externsion equivalent to REX.R in 655 // 1's complement (inverted) form 656 // 657 // 1: Same as REX_R=0 (must be 1 in 32-bit mode) 658 // 0: Same as REX_R=1 (64 bit mode only) 659 // 660 uint8_t VEX_R = 0x1; 661 uint8_t EVEX_R2 = 0x1; 662 663 // VEX_X: equivalent to REX.X, only used when a 664 // register is used for index in SIB Byte. 665 // 666 // 1: Same as REX.X=0 (must be 1 in 32-bit mode) 667 // 0: Same as REX.X=1 (64-bit mode only) 668 uint8_t VEX_X = 0x1; 669 670 // VEX_B: 671 // 672 // 1: Same as REX_B=0 (ignored in 32-bit mode) 673 // 0: Same as REX_B=1 (64 bit mode only) 674 // 675 uint8_t VEX_B = 0x1; 676 677 // VEX_W: opcode specific (use like REX.W, or used for 678 // opcode extension, or ignored, depending on the opcode byte) 679 uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0; 680 681 // VEX_5M (VEX m-mmmmm field): 682 // 683 // 0b00000: Reserved for future use 684 // 0b00001: implied 0F leading opcode 685 // 0b00010: implied 0F 38 leading opcode bytes 686 // 0b00011: implied 0F 3A leading opcode bytes 687 // 0b00100-0b11111: Reserved for future use 688 // 0b01000: XOP map select - 08h instructions with imm byte 689 // 0b01001: XOP map select - 09h instructions with no imm byte 690 // 0b01010: XOP map select - 0Ah instructions with imm dword 691 uint8_t VEX_5M; 692 switch (TSFlags & X86II::OpMapMask) { 693 default: llvm_unreachable("Invalid prefix!"); 694 case X86II::TB: VEX_5M = 0x1; break; // 0F 695 case X86II::T8: VEX_5M = 0x2; break; // 0F 38 696 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A 697 case X86II::XOP8: VEX_5M = 0x8; break; 698 case X86II::XOP9: VEX_5M = 0x9; break; 699 case X86II::XOPA: VEX_5M = 0xA; break; 700 } 701 702 // VEX_4V (VEX vvvv field): a register specifier 703 // (in 1's complement form) or 1111 if unused. 704 uint8_t VEX_4V = 0xf; 705 uint8_t EVEX_V2 = 0x1; 706 707 // EVEX_L2/VEX_L (Vector Length): 708 // 709 // L2 L 710 // 0 0: scalar or 128-bit vector 711 // 0 1: 256-bit vector 712 // 1 0: 512-bit vector 713 // 714 uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0; 715 uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0; 716 717 // VEX_PP: opcode extension providing equivalent 718 // functionality of a SIMD prefix 719 // 720 // 0b00: None 721 // 0b01: 66 722 // 0b10: F3 723 // 0b11: F2 724 // 725 uint8_t VEX_PP = 0; 726 switch (TSFlags & X86II::OpPrefixMask) { 727 case X86II::PD: VEX_PP = 0x1; break; // 66 728 case X86II::XS: VEX_PP = 0x2; break; // F3 729 case X86II::XD: VEX_PP = 0x3; break; // F2 730 } 731 732 // EVEX_U 733 uint8_t EVEX_U = 1; // Always '1' so far 734 735 // EVEX_z 736 uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0; 737 738 // EVEX_b 739 uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0; 740 741 // EVEX_rc 742 uint8_t EVEX_rc = 0; 743 744 // EVEX_aaa 745 uint8_t EVEX_aaa = 0; 746 747 bool EncodeRC = false; 748 749 // Classify VEX_B, VEX_4V, VEX_R, VEX_X 750 unsigned NumOps = Desc.getNumOperands(); 751 unsigned CurOp = X86II::getOperandBias(Desc); 752 753 switch (TSFlags & X86II::FormMask) { 754 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!"); 755 case X86II::RawFrm: 756 break; 757 case X86II::MRMDestMem: { 758 // MRMDestMem instructions forms: 759 // MemAddr, src1(ModR/M) 760 // MemAddr, src1(VEX_4V), src2(ModR/M) 761 // MemAddr, src1(ModR/M), imm8 762 // 763 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 764 VEX_B = ~(BaseRegEnc >> 3) & 1; 765 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 766 VEX_X = ~(IndexRegEnc >> 3) & 1; 767 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. 768 EVEX_V2 = ~(IndexRegEnc >> 4) & 1; 769 770 CurOp += X86::AddrNumOperands; 771 772 if (HasEVEX_K) 773 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 774 775 if (HasVEX_4V) { 776 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 777 VEX_4V = ~VRegEnc & 0xf; 778 EVEX_V2 = ~(VRegEnc >> 4) & 1; 779 } 780 781 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 782 VEX_R = ~(RegEnc >> 3) & 1; 783 EVEX_R2 = ~(RegEnc >> 4) & 1; 784 break; 785 } 786 case X86II::MRMSrcMem: { 787 // MRMSrcMem instructions forms: 788 // src1(ModR/M), MemAddr 789 // src1(ModR/M), src2(VEX_4V), MemAddr 790 // src1(ModR/M), MemAddr, imm8 791 // src1(ModR/M), MemAddr, src2(Imm[7:4]) 792 // 793 // FMA4: 794 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 795 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 796 VEX_R = ~(RegEnc >> 3) & 1; 797 EVEX_R2 = ~(RegEnc >> 4) & 1; 798 799 if (HasEVEX_K) 800 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 801 802 if (HasVEX_4V) { 803 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 804 VEX_4V = ~VRegEnc & 0xf; 805 EVEX_V2 = ~(VRegEnc >> 4) & 1; 806 } 807 808 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 809 VEX_B = ~(BaseRegEnc >> 3) & 1; 810 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 811 VEX_X = ~(IndexRegEnc >> 3) & 1; 812 if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. 813 EVEX_V2 = ~(IndexRegEnc >> 4) & 1; 814 815 break; 816 } 817 case X86II::MRMSrcMem4VOp3: { 818 // Instruction format for 4VOp3: 819 // src1(ModR/M), MemAddr, src3(VEX_4V) 820 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 821 VEX_R = ~(RegEnc >> 3) & 1; 822 823 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 824 VEX_B = ~(BaseRegEnc >> 3) & 1; 825 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 826 VEX_X = ~(IndexRegEnc >> 3) & 1; 827 828 VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; 829 break; 830 } 831 case X86II::MRMSrcMemOp4: { 832 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 833 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 834 VEX_R = ~(RegEnc >> 3) & 1; 835 836 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 837 VEX_4V = ~VRegEnc & 0xf; 838 839 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 840 VEX_B = ~(BaseRegEnc >> 3) & 1; 841 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 842 VEX_X = ~(IndexRegEnc >> 3) & 1; 843 break; 844 } 845 case X86II::MRM0m: case X86II::MRM1m: 846 case X86II::MRM2m: case X86II::MRM3m: 847 case X86II::MRM4m: case X86II::MRM5m: 848 case X86II::MRM6m: case X86II::MRM7m: { 849 // MRM[0-9]m instructions forms: 850 // MemAddr 851 // src1(VEX_4V), MemAddr 852 if (HasVEX_4V) { 853 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 854 VEX_4V = ~VRegEnc & 0xf; 855 EVEX_V2 = ~(VRegEnc >> 4) & 1; 856 } 857 858 if (HasEVEX_K) 859 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 860 861 unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); 862 VEX_B = ~(BaseRegEnc >> 3) & 1; 863 unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); 864 VEX_X = ~(IndexRegEnc >> 3) & 1; 865 break; 866 } 867 case X86II::MRMSrcReg: { 868 // MRMSrcReg instructions forms: 869 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4]) 870 // dst(ModR/M), src1(ModR/M) 871 // dst(ModR/M), src1(ModR/M), imm8 872 // 873 // FMA4: 874 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 875 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 876 VEX_R = ~(RegEnc >> 3) & 1; 877 EVEX_R2 = ~(RegEnc >> 4) & 1; 878 879 if (HasEVEX_K) 880 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 881 882 if (HasVEX_4V) { 883 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 884 VEX_4V = ~VRegEnc & 0xf; 885 EVEX_V2 = ~(VRegEnc >> 4) & 1; 886 } 887 888 RegEnc = getX86RegEncoding(MI, CurOp++); 889 VEX_B = ~(RegEnc >> 3) & 1; 890 VEX_X = ~(RegEnc >> 4) & 1; 891 892 if (EVEX_b) { 893 if (HasEVEX_RC) { 894 unsigned RcOperand = NumOps-1; 895 assert(RcOperand >= CurOp); 896 EVEX_rc = MI.getOperand(RcOperand).getImm(); 897 assert(EVEX_rc <= 3 && "Invalid rounding control!"); 898 } 899 EncodeRC = true; 900 } 901 break; 902 } 903 case X86II::MRMSrcReg4VOp3: { 904 // Instruction format for 4VOp3: 905 // src1(ModR/M), src2(ModR/M), src3(VEX_4V) 906 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 907 VEX_R = ~(RegEnc >> 3) & 1; 908 909 RegEnc = getX86RegEncoding(MI, CurOp++); 910 VEX_B = ~(RegEnc >> 3) & 1; 911 912 VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; 913 break; 914 } 915 case X86II::MRMSrcRegOp4: { 916 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), 917 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 918 VEX_R = ~(RegEnc >> 3) & 1; 919 920 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 921 VEX_4V = ~VRegEnc & 0xf; 922 923 // Skip second register source (encoded in Imm[7:4]) 924 ++CurOp; 925 926 RegEnc = getX86RegEncoding(MI, CurOp++); 927 VEX_B = ~(RegEnc >> 3) & 1; 928 VEX_X = ~(RegEnc >> 4) & 1; 929 break; 930 } 931 case X86II::MRMDestReg: { 932 // MRMDestReg instructions forms: 933 // dst(ModR/M), src(ModR/M) 934 // dst(ModR/M), src(ModR/M), imm8 935 // dst(ModR/M), src1(VEX_4V), src2(ModR/M) 936 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 937 VEX_B = ~(RegEnc >> 3) & 1; 938 VEX_X = ~(RegEnc >> 4) & 1; 939 940 if (HasEVEX_K) 941 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 942 943 if (HasVEX_4V) { 944 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 945 VEX_4V = ~VRegEnc & 0xf; 946 EVEX_V2 = ~(VRegEnc >> 4) & 1; 947 } 948 949 RegEnc = getX86RegEncoding(MI, CurOp++); 950 VEX_R = ~(RegEnc >> 3) & 1; 951 EVEX_R2 = ~(RegEnc >> 4) & 1; 952 if (EVEX_b) 953 EncodeRC = true; 954 break; 955 } 956 case X86II::MRM0r: case X86II::MRM1r: 957 case X86II::MRM2r: case X86II::MRM3r: 958 case X86II::MRM4r: case X86II::MRM5r: 959 case X86II::MRM6r: case X86II::MRM7r: { 960 // MRM0r-MRM7r instructions forms: 961 // dst(VEX_4V), src(ModR/M), imm8 962 if (HasVEX_4V) { 963 unsigned VRegEnc = getX86RegEncoding(MI, CurOp++); 964 VEX_4V = ~VRegEnc & 0xf; 965 EVEX_V2 = ~(VRegEnc >> 4) & 1; 966 } 967 if (HasEVEX_K) 968 EVEX_aaa = getX86RegEncoding(MI, CurOp++); 969 970 unsigned RegEnc = getX86RegEncoding(MI, CurOp++); 971 VEX_B = ~(RegEnc >> 3) & 1; 972 VEX_X = ~(RegEnc >> 4) & 1; 973 break; 974 } 975 } 976 977 if (Encoding == X86II::VEX || Encoding == X86II::XOP) { 978 // VEX opcode prefix can have 2 or 3 bytes 979 // 980 // 3 bytes: 981 // +-----+ +--------------+ +-------------------+ 982 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | 983 // +-----+ +--------------+ +-------------------+ 984 // 2 bytes: 985 // +-----+ +-------------------+ 986 // | C5h | | R | vvvv | L | pp | 987 // +-----+ +-------------------+ 988 // 989 // XOP uses a similar prefix: 990 // +-----+ +--------------+ +-------------------+ 991 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | 992 // +-----+ +--------------+ +-------------------+ 993 uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); 994 995 // Can we use the 2 byte VEX prefix? 996 if (!(MI.getFlags() & X86::IP_USE_VEX3) && 997 Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { 998 EmitByte(0xC5, CurByte, OS); 999 EmitByte(LastByte | (VEX_R << 7), CurByte, OS); 1000 return; 1001 } 1002 1003 // 3 byte VEX prefix 1004 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS); 1005 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); 1006 EmitByte(LastByte | (VEX_W << 7), CurByte, OS); 1007 } else { 1008 assert(Encoding == X86II::EVEX && "unknown encoding!"); 1009 // EVEX opcode prefix can have 4 bytes 1010 // 1011 // +-----+ +--------------+ +-------------------+ +------------------------+ 1012 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | 1013 // +-----+ +--------------+ +-------------------+ +------------------------+ 1014 assert((VEX_5M & 0x3) == VEX_5M 1015 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"); 1016 1017 EmitByte(0x62, CurByte, OS); 1018 EmitByte((VEX_R << 7) | 1019 (VEX_X << 6) | 1020 (VEX_B << 5) | 1021 (EVEX_R2 << 4) | 1022 VEX_5M, CurByte, OS); 1023 EmitByte((VEX_W << 7) | 1024 (VEX_4V << 3) | 1025 (EVEX_U << 2) | 1026 VEX_PP, CurByte, OS); 1027 if (EncodeRC) 1028 EmitByte((EVEX_z << 7) | 1029 (EVEX_rc << 5) | 1030 (EVEX_b << 4) | 1031 (EVEX_V2 << 3) | 1032 EVEX_aaa, CurByte, OS); 1033 else 1034 EmitByte((EVEX_z << 7) | 1035 (EVEX_L2 << 6) | 1036 (VEX_L << 5) | 1037 (EVEX_b << 4) | 1038 (EVEX_V2 << 3) | 1039 EVEX_aaa, CurByte, OS); 1040 } 1041 } 1042 1043 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64 1044 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 1045 /// size, and 3) use of X86-64 extended registers. 1046 uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 1047 int MemOperand, 1048 const MCInstrDesc &Desc) const { 1049 uint8_t REX = 0; 1050 bool UsesHighByteReg = false; 1051 1052 if (TSFlags & X86II::REX_W) 1053 REX |= 1 << 3; // set REX.W 1054 1055 if (MI.getNumOperands() == 0) return REX; 1056 1057 unsigned NumOps = MI.getNumOperands(); 1058 unsigned CurOp = X86II::getOperandBias(Desc); 1059 1060 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 1061 for (unsigned i = CurOp; i != NumOps; ++i) { 1062 const MCOperand &MO = MI.getOperand(i); 1063 if (!MO.isReg()) continue; 1064 unsigned Reg = MO.getReg(); 1065 if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH) 1066 UsesHighByteReg = true; 1067 if (X86II::isX86_64NonExtLowByteReg(Reg)) 1068 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything 1069 // that returns non-zero. 1070 REX |= 0x40; // REX fixed encoding prefix 1071 } 1072 1073 switch (TSFlags & X86II::FormMask) { 1074 case X86II::AddRegFrm: 1075 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1076 break; 1077 case X86II::MRMSrcReg: 1078 case X86II::MRMSrcRegCC: 1079 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1080 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1081 break; 1082 case X86II::MRMSrcMem: 1083 case X86II::MRMSrcMemCC: 1084 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1085 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1086 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1087 CurOp += X86::AddrNumOperands; 1088 break; 1089 case X86II::MRMDestReg: 1090 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1091 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1092 break; 1093 case X86II::MRMDestMem: 1094 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1095 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1096 CurOp += X86::AddrNumOperands; 1097 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R 1098 break; 1099 case X86II::MRMXmCC: case X86II::MRMXm: 1100 case X86II::MRM0m: case X86II::MRM1m: 1101 case X86II::MRM2m: case X86II::MRM3m: 1102 case X86II::MRM4m: case X86II::MRM5m: 1103 case X86II::MRM6m: case X86II::MRM7m: 1104 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B 1105 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X 1106 break; 1107 case X86II::MRMXrCC: case X86II::MRMXr: 1108 case X86II::MRM0r: case X86II::MRM1r: 1109 case X86II::MRM2r: case X86II::MRM3r: 1110 case X86II::MRM4r: case X86II::MRM5r: 1111 case X86II::MRM6r: case X86II::MRM7r: 1112 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B 1113 break; 1114 } 1115 if (REX && UsesHighByteReg) 1116 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction"); 1117 1118 return REX; 1119 } 1120 1121 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed 1122 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte, 1123 unsigned SegOperand, 1124 const MCInst &MI, 1125 raw_ostream &OS) const { 1126 // Check for explicit segment override on memory operand. 1127 switch (MI.getOperand(SegOperand).getReg()) { 1128 default: llvm_unreachable("Unknown segment register!"); 1129 case 0: break; 1130 case X86::CS: EmitByte(0x2E, CurByte, OS); break; 1131 case X86::SS: EmitByte(0x36, CurByte, OS); break; 1132 case X86::DS: EmitByte(0x3E, CurByte, OS); break; 1133 case X86::ES: EmitByte(0x26, CurByte, OS); break; 1134 case X86::FS: EmitByte(0x64, CurByte, OS); break; 1135 case X86::GS: EmitByte(0x65, CurByte, OS); break; 1136 } 1137 } 1138 1139 /// Emit all instruction prefixes prior to the opcode. 1140 /// 1141 /// MemOperand is the operand # of the start of a memory operand if present. If 1142 /// Not present, it is -1. 1143 /// 1144 /// Returns true if a REX prefix was used. 1145 bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 1146 int MemOperand, const MCInst &MI, 1147 const MCInstrDesc &Desc, 1148 const MCSubtargetInfo &STI, 1149 raw_ostream &OS) const { 1150 bool Ret = false; 1151 // Emit the operand size opcode prefix as needed. 1152 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32 1153 : X86II::OpSize16)) 1154 EmitByte(0x66, CurByte, OS); 1155 1156 // Emit the LOCK opcode prefix. 1157 if (TSFlags & X86II::LOCK || MI.getFlags() & X86::IP_HAS_LOCK) 1158 EmitByte(0xF0, CurByte, OS); 1159 1160 // Emit the NOTRACK opcode prefix. 1161 if (TSFlags & X86II::NOTRACK || MI.getFlags() & X86::IP_HAS_NOTRACK) 1162 EmitByte(0x3E, CurByte, OS); 1163 1164 switch (TSFlags & X86II::OpPrefixMask) { 1165 case X86II::PD: // 66 1166 EmitByte(0x66, CurByte, OS); 1167 break; 1168 case X86II::XS: // F3 1169 EmitByte(0xF3, CurByte, OS); 1170 break; 1171 case X86II::XD: // F2 1172 EmitByte(0xF2, CurByte, OS); 1173 break; 1174 } 1175 1176 // Handle REX prefix. 1177 // FIXME: Can this come before F2 etc to simplify emission? 1178 if (is64BitMode(STI)) { 1179 if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) { 1180 EmitByte(0x40 | REX, CurByte, OS); 1181 Ret = true; 1182 } 1183 } else { 1184 assert(!(TSFlags & X86II::REX_W) && "REX.W requires 64bit mode."); 1185 } 1186 1187 // 0x0F escape code must be emitted just before the opcode. 1188 switch (TSFlags & X86II::OpMapMask) { 1189 case X86II::TB: // Two-byte opcode map 1190 case X86II::T8: // 0F 38 1191 case X86II::TA: // 0F 3A 1192 case X86II::ThreeDNow: // 0F 0F, second 0F emitted by caller. 1193 EmitByte(0x0F, CurByte, OS); 1194 break; 1195 } 1196 1197 switch (TSFlags & X86II::OpMapMask) { 1198 case X86II::T8: // 0F 38 1199 EmitByte(0x38, CurByte, OS); 1200 break; 1201 case X86II::TA: // 0F 3A 1202 EmitByte(0x3A, CurByte, OS); 1203 break; 1204 } 1205 return Ret; 1206 } 1207 1208 void X86MCCodeEmitter:: 1209 encodeInstruction(const MCInst &MI, raw_ostream &OS, 1210 SmallVectorImpl<MCFixup> &Fixups, 1211 const MCSubtargetInfo &STI) const { 1212 unsigned Opcode = MI.getOpcode(); 1213 const MCInstrDesc &Desc = MCII.get(Opcode); 1214 uint64_t TSFlags = Desc.TSFlags; 1215 unsigned Flags = MI.getFlags(); 1216 1217 // Pseudo instructions don't get encoded. 1218 if ((TSFlags & X86II::FormMask) == X86II::Pseudo) 1219 return; 1220 1221 unsigned NumOps = Desc.getNumOperands(); 1222 unsigned CurOp = X86II::getOperandBias(Desc); 1223 1224 // Keep track of the current byte being emitted. 1225 unsigned CurByte = 0; 1226 1227 // Encoding type for this instruction. 1228 uint64_t Encoding = TSFlags & X86II::EncodingMask; 1229 1230 // It uses the VEX.VVVV field? 1231 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 1232 bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg; 1233 1234 // It uses the EVEX.aaa field? 1235 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 1236 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 1237 1238 // Used if a register is encoded in 7:4 of immediate. 1239 unsigned I8RegNum = 0; 1240 1241 // Determine where the memory operand starts, if present. 1242 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags); 1243 if (MemoryOperand != -1) MemoryOperand += CurOp; 1244 1245 // Emit segment override opcode prefix as needed. 1246 if (MemoryOperand >= 0) 1247 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg, 1248 MI, OS); 1249 1250 // Emit the repeat opcode prefix as needed. 1251 if (TSFlags & X86II::REP || Flags & X86::IP_HAS_REPEAT) 1252 EmitByte(0xF3, CurByte, OS); 1253 if (Flags & X86::IP_HAS_REPEAT_NE) 1254 EmitByte(0xF2, CurByte, OS); 1255 1256 // Emit the address size opcode prefix as needed. 1257 bool need_address_override; 1258 uint64_t AdSize = TSFlags & X86II::AdSizeMask; 1259 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) || 1260 (is32BitMode(STI) && AdSize == X86II::AdSize16) || 1261 (is64BitMode(STI) && AdSize == X86II::AdSize32)) { 1262 need_address_override = true; 1263 } else if (MemoryOperand < 0) { 1264 need_address_override = false; 1265 } else if (is64BitMode(STI)) { 1266 assert(!Is16BitMemOperand(MI, MemoryOperand, STI)); 1267 need_address_override = Is32BitMemOperand(MI, MemoryOperand); 1268 } else if (is32BitMode(STI)) { 1269 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1270 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI); 1271 } else { 1272 assert(is16BitMode(STI)); 1273 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1274 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI); 1275 } 1276 1277 if (need_address_override) 1278 EmitByte(0x67, CurByte, OS); 1279 1280 bool Rex = false; 1281 if (Encoding == 0) 1282 Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS); 1283 else 1284 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); 1285 1286 uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); 1287 1288 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 1289 BaseOpcode = 0x0F; // Weird 3DNow! encoding. 1290 1291 unsigned OpcodeOffset = 0; 1292 1293 uint64_t Form = TSFlags & X86II::FormMask; 1294 switch (Form) { 1295 default: errs() << "FORM: " << Form << "\n"; 1296 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); 1297 case X86II::Pseudo: 1298 llvm_unreachable("Pseudo instruction shouldn't be emitted"); 1299 case X86II::RawFrmDstSrc: { 1300 unsigned siReg = MI.getOperand(1).getReg(); 1301 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || 1302 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || 1303 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && 1304 "SI and DI register sizes do not match"); 1305 // Emit segment override opcode prefix as needed (not for %ds). 1306 if (MI.getOperand(2).getReg() != X86::DS) 1307 EmitSegmentOverridePrefix(CurByte, 2, MI, OS); 1308 // Emit AdSize prefix as needed. 1309 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1310 (is32BitMode(STI) && siReg == X86::SI)) 1311 EmitByte(0x67, CurByte, OS); 1312 CurOp += 3; // Consume operands. 1313 EmitByte(BaseOpcode, CurByte, OS); 1314 break; 1315 } 1316 case X86II::RawFrmSrc: { 1317 unsigned siReg = MI.getOperand(0).getReg(); 1318 // Emit segment override opcode prefix as needed (not for %ds). 1319 if (MI.getOperand(1).getReg() != X86::DS) 1320 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1321 // Emit AdSize prefix as needed. 1322 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1323 (is32BitMode(STI) && siReg == X86::SI)) 1324 EmitByte(0x67, CurByte, OS); 1325 CurOp += 2; // Consume operands. 1326 EmitByte(BaseOpcode, CurByte, OS); 1327 break; 1328 } 1329 case X86II::RawFrmDst: { 1330 unsigned siReg = MI.getOperand(0).getReg(); 1331 // Emit AdSize prefix as needed. 1332 if ((!is32BitMode(STI) && siReg == X86::EDI) || 1333 (is32BitMode(STI) && siReg == X86::DI)) 1334 EmitByte(0x67, CurByte, OS); 1335 ++CurOp; // Consume operand. 1336 EmitByte(BaseOpcode, CurByte, OS); 1337 break; 1338 } 1339 case X86II::AddCCFrm: { 1340 // This will be added to the opcode in the fallthrough. 1341 OpcodeOffset = MI.getOperand(NumOps - 1).getImm(); 1342 assert(OpcodeOffset < 16 && "Unexpected opcode offset!"); 1343 --NumOps; // Drop the operand from the end. 1344 LLVM_FALLTHROUGH; 1345 case X86II::RawFrm: 1346 EmitByte(BaseOpcode + OpcodeOffset, CurByte, OS); 1347 1348 if (!is64BitMode(STI) || !isPCRel32Branch(MI)) 1349 break; 1350 1351 const MCOperand &Op = MI.getOperand(CurOp++); 1352 EmitImmediate(Op, MI.getLoc(), X86II::getSizeOfImm(TSFlags), 1353 MCFixupKind(X86::reloc_branch_4byte_pcrel), CurByte, OS, 1354 Fixups); 1355 break; 1356 } 1357 case X86II::RawFrmMemOffs: 1358 // Emit segment override opcode prefix as needed. 1359 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1360 EmitByte(BaseOpcode, CurByte, OS); 1361 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1362 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1363 CurByte, OS, Fixups); 1364 ++CurOp; // skip segment operand 1365 break; 1366 case X86II::RawFrmImm8: 1367 EmitByte(BaseOpcode, CurByte, OS); 1368 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1369 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1370 CurByte, OS, Fixups); 1371 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte, 1372 OS, Fixups); 1373 break; 1374 case X86II::RawFrmImm16: 1375 EmitByte(BaseOpcode, CurByte, OS); 1376 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1377 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1378 CurByte, OS, Fixups); 1379 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte, 1380 OS, Fixups); 1381 break; 1382 1383 case X86II::AddRegFrm: 1384 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); 1385 break; 1386 1387 case X86II::MRMDestReg: { 1388 EmitByte(BaseOpcode, CurByte, OS); 1389 unsigned SrcRegNum = CurOp + 1; 1390 1391 if (HasEVEX_K) // Skip writemask 1392 ++SrcRegNum; 1393 1394 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1395 ++SrcRegNum; 1396 1397 EmitRegModRMByte(MI.getOperand(CurOp), 1398 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS); 1399 CurOp = SrcRegNum + 1; 1400 break; 1401 } 1402 case X86II::MRMDestMem: { 1403 EmitByte(BaseOpcode, CurByte, OS); 1404 unsigned SrcRegNum = CurOp + X86::AddrNumOperands; 1405 1406 if (HasEVEX_K) // Skip writemask 1407 ++SrcRegNum; 1408 1409 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1410 ++SrcRegNum; 1411 1412 emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, 1413 Rex, CurByte, OS, Fixups, STI); 1414 CurOp = SrcRegNum + 1; 1415 break; 1416 } 1417 case X86II::MRMSrcReg: { 1418 EmitByte(BaseOpcode, CurByte, OS); 1419 unsigned SrcRegNum = CurOp + 1; 1420 1421 if (HasEVEX_K) // Skip writemask 1422 ++SrcRegNum; 1423 1424 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1425 ++SrcRegNum; 1426 1427 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1428 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1429 CurOp = SrcRegNum + 1; 1430 if (HasVEX_I8Reg) 1431 I8RegNum = getX86RegEncoding(MI, CurOp++); 1432 // do not count the rounding control operand 1433 if (HasEVEX_RC) 1434 --NumOps; 1435 break; 1436 } 1437 case X86II::MRMSrcReg4VOp3: { 1438 EmitByte(BaseOpcode, CurByte, OS); 1439 unsigned SrcRegNum = CurOp + 1; 1440 1441 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1442 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1443 CurOp = SrcRegNum + 1; 1444 ++CurOp; // Encoded in VEX.VVVV 1445 break; 1446 } 1447 case X86II::MRMSrcRegOp4: { 1448 EmitByte(BaseOpcode, CurByte, OS); 1449 unsigned SrcRegNum = CurOp + 1; 1450 1451 // Skip 1st src (which is encoded in VEX_VVVV) 1452 ++SrcRegNum; 1453 1454 // Capture 2nd src (which is encoded in Imm[7:4]) 1455 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1456 I8RegNum = getX86RegEncoding(MI, SrcRegNum++); 1457 1458 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1459 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1460 CurOp = SrcRegNum + 1; 1461 break; 1462 } 1463 case X86II::MRMSrcRegCC: { 1464 unsigned FirstOp = CurOp++; 1465 unsigned SecondOp = CurOp++; 1466 1467 unsigned CC = MI.getOperand(CurOp++).getImm(); 1468 EmitByte(BaseOpcode + CC, CurByte, OS); 1469 1470 EmitRegModRMByte(MI.getOperand(SecondOp), 1471 GetX86RegNum(MI.getOperand(FirstOp)), CurByte, OS); 1472 break; 1473 } 1474 case X86II::MRMSrcMem: { 1475 unsigned FirstMemOp = CurOp+1; 1476 1477 if (HasEVEX_K) // Skip writemask 1478 ++FirstMemOp; 1479 1480 if (HasVEX_4V) 1481 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1482 1483 EmitByte(BaseOpcode, CurByte, OS); 1484 1485 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1486 TSFlags, Rex, CurByte, OS, Fixups, STI); 1487 CurOp = FirstMemOp + X86::AddrNumOperands; 1488 if (HasVEX_I8Reg) 1489 I8RegNum = getX86RegEncoding(MI, CurOp++); 1490 break; 1491 } 1492 case X86II::MRMSrcMem4VOp3: { 1493 unsigned FirstMemOp = CurOp+1; 1494 1495 EmitByte(BaseOpcode, CurByte, OS); 1496 1497 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1498 TSFlags, Rex, CurByte, OS, Fixups, STI); 1499 CurOp = FirstMemOp + X86::AddrNumOperands; 1500 ++CurOp; // Encoded in VEX.VVVV. 1501 break; 1502 } 1503 case X86II::MRMSrcMemOp4: { 1504 unsigned FirstMemOp = CurOp+1; 1505 1506 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1507 1508 // Capture second register source (encoded in Imm[7:4]) 1509 assert(HasVEX_I8Reg && "MRMSrcRegOp4 should imply VEX_I8Reg"); 1510 I8RegNum = getX86RegEncoding(MI, FirstMemOp++); 1511 1512 EmitByte(BaseOpcode, CurByte, OS); 1513 1514 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1515 TSFlags, Rex, CurByte, OS, Fixups, STI); 1516 CurOp = FirstMemOp + X86::AddrNumOperands; 1517 break; 1518 } 1519 case X86II::MRMSrcMemCC: { 1520 unsigned RegOp = CurOp++; 1521 unsigned FirstMemOp = CurOp; 1522 CurOp = FirstMemOp + X86::AddrNumOperands; 1523 1524 unsigned CC = MI.getOperand(CurOp++).getImm(); 1525 EmitByte(BaseOpcode + CC, CurByte, OS); 1526 1527 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(RegOp)), 1528 TSFlags, Rex, CurByte, OS, Fixups, STI); 1529 break; 1530 } 1531 1532 case X86II::MRMXrCC: { 1533 unsigned RegOp = CurOp++; 1534 1535 unsigned CC = MI.getOperand(CurOp++).getImm(); 1536 EmitByte(BaseOpcode + CC, CurByte, OS); 1537 EmitRegModRMByte(MI.getOperand(RegOp), 0, CurByte, OS); 1538 break; 1539 } 1540 1541 case X86II::MRMXr: 1542 case X86II::MRM0r: case X86II::MRM1r: 1543 case X86II::MRM2r: case X86II::MRM3r: 1544 case X86II::MRM4r: case X86II::MRM5r: 1545 case X86II::MRM6r: case X86II::MRM7r: 1546 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1547 ++CurOp; 1548 if (HasEVEX_K) // Skip writemask 1549 ++CurOp; 1550 EmitByte(BaseOpcode, CurByte, OS); 1551 EmitRegModRMByte(MI.getOperand(CurOp++), 1552 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r, 1553 CurByte, OS); 1554 break; 1555 1556 case X86II::MRMXmCC: { 1557 unsigned FirstMemOp = CurOp; 1558 CurOp = FirstMemOp + X86::AddrNumOperands; 1559 1560 unsigned CC = MI.getOperand(CurOp++).getImm(); 1561 EmitByte(BaseOpcode + CC, CurByte, OS); 1562 1563 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Rex, CurByte, OS, Fixups, STI); 1564 break; 1565 } 1566 1567 case X86II::MRMXm: 1568 case X86II::MRM0m: case X86II::MRM1m: 1569 case X86II::MRM2m: case X86II::MRM3m: 1570 case X86II::MRM4m: case X86II::MRM5m: 1571 case X86II::MRM6m: case X86II::MRM7m: 1572 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1573 ++CurOp; 1574 if (HasEVEX_K) // Skip writemask 1575 ++CurOp; 1576 EmitByte(BaseOpcode, CurByte, OS); 1577 emitMemModRMByte(MI, CurOp, 1578 (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags, 1579 Rex, CurByte, OS, Fixups, STI); 1580 CurOp += X86::AddrNumOperands; 1581 break; 1582 1583 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2: 1584 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C5: 1585 case X86II::MRM_C6: case X86II::MRM_C7: case X86II::MRM_C8: 1586 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB: 1587 case X86II::MRM_CC: case X86II::MRM_CD: case X86II::MRM_CE: 1588 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1: 1589 case X86II::MRM_D2: case X86II::MRM_D3: case X86II::MRM_D4: 1590 case X86II::MRM_D5: case X86II::MRM_D6: case X86II::MRM_D7: 1591 case X86II::MRM_D8: case X86II::MRM_D9: case X86II::MRM_DA: 1592 case X86II::MRM_DB: case X86II::MRM_DC: case X86II::MRM_DD: 1593 case X86II::MRM_DE: case X86II::MRM_DF: case X86II::MRM_E0: 1594 case X86II::MRM_E1: case X86II::MRM_E2: case X86II::MRM_E3: 1595 case X86II::MRM_E4: case X86II::MRM_E5: case X86II::MRM_E6: 1596 case X86II::MRM_E7: case X86II::MRM_E8: case X86II::MRM_E9: 1597 case X86II::MRM_EA: case X86II::MRM_EB: case X86II::MRM_EC: 1598 case X86II::MRM_ED: case X86II::MRM_EE: case X86II::MRM_EF: 1599 case X86II::MRM_F0: case X86II::MRM_F1: case X86II::MRM_F2: 1600 case X86II::MRM_F3: case X86II::MRM_F4: case X86II::MRM_F5: 1601 case X86II::MRM_F6: case X86II::MRM_F7: case X86II::MRM_F8: 1602 case X86II::MRM_F9: case X86II::MRM_FA: case X86II::MRM_FB: 1603 case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE: 1604 case X86II::MRM_FF: 1605 EmitByte(BaseOpcode, CurByte, OS); 1606 EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS); 1607 break; 1608 } 1609 1610 if (HasVEX_I8Reg) { 1611 // The last source register of a 4 operand instruction in AVX is encoded 1612 // in bits[7:4] of a immediate byte. 1613 assert(I8RegNum < 16 && "Register encoding out of range"); 1614 I8RegNum <<= 4; 1615 if (CurOp != NumOps) { 1616 unsigned Val = MI.getOperand(CurOp++).getImm(); 1617 assert(Val < 16 && "Immediate operand value out of range"); 1618 I8RegNum |= Val; 1619 } 1620 EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1, 1621 CurByte, OS, Fixups); 1622 } else { 1623 // If there is a remaining operand, it must be a trailing immediate. Emit it 1624 // according to the right size for the instruction. Some instructions 1625 // (SSE4a extrq and insertq) have two trailing immediates. 1626 while (CurOp != NumOps && NumOps - CurOp <= 2) { 1627 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1628 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1629 CurByte, OS, Fixups); 1630 } 1631 } 1632 1633 if ((TSFlags & X86II::OpMapMask) == X86II::ThreeDNow) 1634 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS); 1635 1636 #ifndef NDEBUG 1637 // FIXME: Verify. 1638 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { 1639 errs() << "Cannot encode all operands of: "; 1640 MI.dump(); 1641 errs() << '\n'; 1642 abort(); 1643 } 1644 #endif 1645 } 1646 1647 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, 1648 const MCRegisterInfo &MRI, 1649 MCContext &Ctx) { 1650 return new X86MCCodeEmitter(MCII, Ctx); 1651 } 1652