1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides X86 specific target descriptions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "X86MCTargetDesc.h" 14 #include "TargetInfo/X86TargetInfo.h" 15 #include "X86ATTInstPrinter.h" 16 #include "X86BaseInfo.h" 17 #include "X86IntelInstPrinter.h" 18 #include "X86MCAsmInfo.h" 19 #include "llvm/ADT/APInt.h" 20 #include "llvm/ADT/Triple.h" 21 #include "llvm/DebugInfo/CodeView/CodeView.h" 22 #include "llvm/MC/MCDwarf.h" 23 #include "llvm/MC/MCInstrAnalysis.h" 24 #include "llvm/MC/MCInstrInfo.h" 25 #include "llvm/MC/MCRegisterInfo.h" 26 #include "llvm/MC/MCStreamer.h" 27 #include "llvm/MC/MCSubtargetInfo.h" 28 #include "llvm/MC/MachineLocation.h" 29 #include "llvm/MC/TargetRegistry.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/Host.h" 32 33 using namespace llvm; 34 35 #define GET_REGINFO_MC_DESC 36 #include "X86GenRegisterInfo.inc" 37 38 #define GET_INSTRINFO_MC_DESC 39 #define GET_INSTRINFO_MC_HELPERS 40 #include "X86GenInstrInfo.inc" 41 42 #define GET_SUBTARGETINFO_MC_DESC 43 #include "X86GenSubtargetInfo.inc" 44 45 std::string X86_MC::ParseX86Triple(const Triple &TT) { 46 std::string FS; 47 // SSE2 should default to enabled in 64-bit mode, but can be turned off 48 // explicitly. 49 if (TT.isArch64Bit()) 50 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2"; 51 else if (TT.getEnvironment() != Triple::CODE16) 52 FS = "-64bit-mode,+32bit-mode,-16bit-mode"; 53 else 54 FS = "-64bit-mode,-32bit-mode,+16bit-mode"; 55 56 return FS; 57 } 58 59 unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) { 60 if (TT.getArch() == Triple::x86_64) 61 return DWARFFlavour::X86_64; 62 63 if (TT.isOSDarwin()) 64 return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic; 65 if (TT.isOSCygMing()) 66 // Unsupported by now, just quick fallback 67 return DWARFFlavour::X86_32_Generic; 68 return DWARFFlavour::X86_32_Generic; 69 } 70 71 bool X86_MC::hasLockPrefix(const MCInst &MI) { 72 return MI.getFlags() & X86::IP_HAS_LOCK; 73 } 74 75 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) { 76 // FIXME: TableGen these. 77 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) { 78 unsigned SEH = MRI->getEncodingValue(Reg); 79 MRI->mapLLVMRegToSEHReg(Reg, SEH); 80 } 81 82 // Mapping from CodeView to MC register id. 83 static const struct { 84 codeview::RegisterId CVReg; 85 MCPhysReg Reg; 86 } RegMap[] = { 87 {codeview::RegisterId::AL, X86::AL}, 88 {codeview::RegisterId::CL, X86::CL}, 89 {codeview::RegisterId::DL, X86::DL}, 90 {codeview::RegisterId::BL, X86::BL}, 91 {codeview::RegisterId::AH, X86::AH}, 92 {codeview::RegisterId::CH, X86::CH}, 93 {codeview::RegisterId::DH, X86::DH}, 94 {codeview::RegisterId::BH, X86::BH}, 95 {codeview::RegisterId::AX, X86::AX}, 96 {codeview::RegisterId::CX, X86::CX}, 97 {codeview::RegisterId::DX, X86::DX}, 98 {codeview::RegisterId::BX, X86::BX}, 99 {codeview::RegisterId::SP, X86::SP}, 100 {codeview::RegisterId::BP, X86::BP}, 101 {codeview::RegisterId::SI, X86::SI}, 102 {codeview::RegisterId::DI, X86::DI}, 103 {codeview::RegisterId::EAX, X86::EAX}, 104 {codeview::RegisterId::ECX, X86::ECX}, 105 {codeview::RegisterId::EDX, X86::EDX}, 106 {codeview::RegisterId::EBX, X86::EBX}, 107 {codeview::RegisterId::ESP, X86::ESP}, 108 {codeview::RegisterId::EBP, X86::EBP}, 109 {codeview::RegisterId::ESI, X86::ESI}, 110 {codeview::RegisterId::EDI, X86::EDI}, 111 112 {codeview::RegisterId::EFLAGS, X86::EFLAGS}, 113 114 {codeview::RegisterId::ST0, X86::ST0}, 115 {codeview::RegisterId::ST1, X86::ST1}, 116 {codeview::RegisterId::ST2, X86::ST2}, 117 {codeview::RegisterId::ST3, X86::ST3}, 118 {codeview::RegisterId::ST4, X86::ST4}, 119 {codeview::RegisterId::ST5, X86::ST5}, 120 {codeview::RegisterId::ST6, X86::ST6}, 121 {codeview::RegisterId::ST7, X86::ST7}, 122 123 {codeview::RegisterId::ST0, X86::FP0}, 124 {codeview::RegisterId::ST1, X86::FP1}, 125 {codeview::RegisterId::ST2, X86::FP2}, 126 {codeview::RegisterId::ST3, X86::FP3}, 127 {codeview::RegisterId::ST4, X86::FP4}, 128 {codeview::RegisterId::ST5, X86::FP5}, 129 {codeview::RegisterId::ST6, X86::FP6}, 130 {codeview::RegisterId::ST7, X86::FP7}, 131 132 {codeview::RegisterId::MM0, X86::MM0}, 133 {codeview::RegisterId::MM1, X86::MM1}, 134 {codeview::RegisterId::MM2, X86::MM2}, 135 {codeview::RegisterId::MM3, X86::MM3}, 136 {codeview::RegisterId::MM4, X86::MM4}, 137 {codeview::RegisterId::MM5, X86::MM5}, 138 {codeview::RegisterId::MM6, X86::MM6}, 139 {codeview::RegisterId::MM7, X86::MM7}, 140 141 {codeview::RegisterId::XMM0, X86::XMM0}, 142 {codeview::RegisterId::XMM1, X86::XMM1}, 143 {codeview::RegisterId::XMM2, X86::XMM2}, 144 {codeview::RegisterId::XMM3, X86::XMM3}, 145 {codeview::RegisterId::XMM4, X86::XMM4}, 146 {codeview::RegisterId::XMM5, X86::XMM5}, 147 {codeview::RegisterId::XMM6, X86::XMM6}, 148 {codeview::RegisterId::XMM7, X86::XMM7}, 149 150 {codeview::RegisterId::XMM8, X86::XMM8}, 151 {codeview::RegisterId::XMM9, X86::XMM9}, 152 {codeview::RegisterId::XMM10, X86::XMM10}, 153 {codeview::RegisterId::XMM11, X86::XMM11}, 154 {codeview::RegisterId::XMM12, X86::XMM12}, 155 {codeview::RegisterId::XMM13, X86::XMM13}, 156 {codeview::RegisterId::XMM14, X86::XMM14}, 157 {codeview::RegisterId::XMM15, X86::XMM15}, 158 159 {codeview::RegisterId::SIL, X86::SIL}, 160 {codeview::RegisterId::DIL, X86::DIL}, 161 {codeview::RegisterId::BPL, X86::BPL}, 162 {codeview::RegisterId::SPL, X86::SPL}, 163 {codeview::RegisterId::RAX, X86::RAX}, 164 {codeview::RegisterId::RBX, X86::RBX}, 165 {codeview::RegisterId::RCX, X86::RCX}, 166 {codeview::RegisterId::RDX, X86::RDX}, 167 {codeview::RegisterId::RSI, X86::RSI}, 168 {codeview::RegisterId::RDI, X86::RDI}, 169 {codeview::RegisterId::RBP, X86::RBP}, 170 {codeview::RegisterId::RSP, X86::RSP}, 171 {codeview::RegisterId::R8, X86::R8}, 172 {codeview::RegisterId::R9, X86::R9}, 173 {codeview::RegisterId::R10, X86::R10}, 174 {codeview::RegisterId::R11, X86::R11}, 175 {codeview::RegisterId::R12, X86::R12}, 176 {codeview::RegisterId::R13, X86::R13}, 177 {codeview::RegisterId::R14, X86::R14}, 178 {codeview::RegisterId::R15, X86::R15}, 179 {codeview::RegisterId::R8B, X86::R8B}, 180 {codeview::RegisterId::R9B, X86::R9B}, 181 {codeview::RegisterId::R10B, X86::R10B}, 182 {codeview::RegisterId::R11B, X86::R11B}, 183 {codeview::RegisterId::R12B, X86::R12B}, 184 {codeview::RegisterId::R13B, X86::R13B}, 185 {codeview::RegisterId::R14B, X86::R14B}, 186 {codeview::RegisterId::R15B, X86::R15B}, 187 {codeview::RegisterId::R8W, X86::R8W}, 188 {codeview::RegisterId::R9W, X86::R9W}, 189 {codeview::RegisterId::R10W, X86::R10W}, 190 {codeview::RegisterId::R11W, X86::R11W}, 191 {codeview::RegisterId::R12W, X86::R12W}, 192 {codeview::RegisterId::R13W, X86::R13W}, 193 {codeview::RegisterId::R14W, X86::R14W}, 194 {codeview::RegisterId::R15W, X86::R15W}, 195 {codeview::RegisterId::R8D, X86::R8D}, 196 {codeview::RegisterId::R9D, X86::R9D}, 197 {codeview::RegisterId::R10D, X86::R10D}, 198 {codeview::RegisterId::R11D, X86::R11D}, 199 {codeview::RegisterId::R12D, X86::R12D}, 200 {codeview::RegisterId::R13D, X86::R13D}, 201 {codeview::RegisterId::R14D, X86::R14D}, 202 {codeview::RegisterId::R15D, X86::R15D}, 203 {codeview::RegisterId::AMD64_YMM0, X86::YMM0}, 204 {codeview::RegisterId::AMD64_YMM1, X86::YMM1}, 205 {codeview::RegisterId::AMD64_YMM2, X86::YMM2}, 206 {codeview::RegisterId::AMD64_YMM3, X86::YMM3}, 207 {codeview::RegisterId::AMD64_YMM4, X86::YMM4}, 208 {codeview::RegisterId::AMD64_YMM5, X86::YMM5}, 209 {codeview::RegisterId::AMD64_YMM6, X86::YMM6}, 210 {codeview::RegisterId::AMD64_YMM7, X86::YMM7}, 211 {codeview::RegisterId::AMD64_YMM8, X86::YMM8}, 212 {codeview::RegisterId::AMD64_YMM9, X86::YMM9}, 213 {codeview::RegisterId::AMD64_YMM10, X86::YMM10}, 214 {codeview::RegisterId::AMD64_YMM11, X86::YMM11}, 215 {codeview::RegisterId::AMD64_YMM12, X86::YMM12}, 216 {codeview::RegisterId::AMD64_YMM13, X86::YMM13}, 217 {codeview::RegisterId::AMD64_YMM14, X86::YMM14}, 218 {codeview::RegisterId::AMD64_YMM15, X86::YMM15}, 219 {codeview::RegisterId::AMD64_YMM16, X86::YMM16}, 220 {codeview::RegisterId::AMD64_YMM17, X86::YMM17}, 221 {codeview::RegisterId::AMD64_YMM18, X86::YMM18}, 222 {codeview::RegisterId::AMD64_YMM19, X86::YMM19}, 223 {codeview::RegisterId::AMD64_YMM20, X86::YMM20}, 224 {codeview::RegisterId::AMD64_YMM21, X86::YMM21}, 225 {codeview::RegisterId::AMD64_YMM22, X86::YMM22}, 226 {codeview::RegisterId::AMD64_YMM23, X86::YMM23}, 227 {codeview::RegisterId::AMD64_YMM24, X86::YMM24}, 228 {codeview::RegisterId::AMD64_YMM25, X86::YMM25}, 229 {codeview::RegisterId::AMD64_YMM26, X86::YMM26}, 230 {codeview::RegisterId::AMD64_YMM27, X86::YMM27}, 231 {codeview::RegisterId::AMD64_YMM28, X86::YMM28}, 232 {codeview::RegisterId::AMD64_YMM29, X86::YMM29}, 233 {codeview::RegisterId::AMD64_YMM30, X86::YMM30}, 234 {codeview::RegisterId::AMD64_YMM31, X86::YMM31}, 235 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0}, 236 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1}, 237 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2}, 238 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3}, 239 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4}, 240 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5}, 241 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6}, 242 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7}, 243 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8}, 244 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9}, 245 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10}, 246 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11}, 247 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12}, 248 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13}, 249 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14}, 250 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15}, 251 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16}, 252 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17}, 253 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18}, 254 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19}, 255 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20}, 256 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21}, 257 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22}, 258 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23}, 259 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24}, 260 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25}, 261 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26}, 262 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27}, 263 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28}, 264 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29}, 265 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30}, 266 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31}, 267 {codeview::RegisterId::AMD64_K0, X86::K0}, 268 {codeview::RegisterId::AMD64_K1, X86::K1}, 269 {codeview::RegisterId::AMD64_K2, X86::K2}, 270 {codeview::RegisterId::AMD64_K3, X86::K3}, 271 {codeview::RegisterId::AMD64_K4, X86::K4}, 272 {codeview::RegisterId::AMD64_K5, X86::K5}, 273 {codeview::RegisterId::AMD64_K6, X86::K6}, 274 {codeview::RegisterId::AMD64_K7, X86::K7}, 275 {codeview::RegisterId::AMD64_XMM16, X86::XMM16}, 276 {codeview::RegisterId::AMD64_XMM17, X86::XMM17}, 277 {codeview::RegisterId::AMD64_XMM18, X86::XMM18}, 278 {codeview::RegisterId::AMD64_XMM19, X86::XMM19}, 279 {codeview::RegisterId::AMD64_XMM20, X86::XMM20}, 280 {codeview::RegisterId::AMD64_XMM21, X86::XMM21}, 281 {codeview::RegisterId::AMD64_XMM22, X86::XMM22}, 282 {codeview::RegisterId::AMD64_XMM23, X86::XMM23}, 283 {codeview::RegisterId::AMD64_XMM24, X86::XMM24}, 284 {codeview::RegisterId::AMD64_XMM25, X86::XMM25}, 285 {codeview::RegisterId::AMD64_XMM26, X86::XMM26}, 286 {codeview::RegisterId::AMD64_XMM27, X86::XMM27}, 287 {codeview::RegisterId::AMD64_XMM28, X86::XMM28}, 288 {codeview::RegisterId::AMD64_XMM29, X86::XMM29}, 289 {codeview::RegisterId::AMD64_XMM30, X86::XMM30}, 290 {codeview::RegisterId::AMD64_XMM31, X86::XMM31}, 291 292 }; 293 for (const auto &I : RegMap) 294 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg)); 295 } 296 297 MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT, 298 StringRef CPU, StringRef FS) { 299 std::string ArchFS = X86_MC::ParseX86Triple(TT); 300 assert(!ArchFS.empty() && "Failed to parse X86 triple"); 301 if (!FS.empty()) 302 ArchFS = (Twine(ArchFS) + "," + FS).str(); 303 304 if (CPU.empty()) 305 CPU = "generic"; 306 307 return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS); 308 } 309 310 static MCInstrInfo *createX86MCInstrInfo() { 311 MCInstrInfo *X = new MCInstrInfo(); 312 InitX86MCInstrInfo(X); 313 return X; 314 } 315 316 static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) { 317 unsigned RA = (TT.getArch() == Triple::x86_64) 318 ? X86::RIP // Should have dwarf #16. 319 : X86::EIP; // Should have dwarf #8. 320 321 MCRegisterInfo *X = new MCRegisterInfo(); 322 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false), 323 X86_MC::getDwarfRegFlavour(TT, true), RA); 324 X86_MC::initLLVMToSEHAndCVRegMapping(X); 325 return X; 326 } 327 328 static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI, 329 const Triple &TheTriple, 330 const MCTargetOptions &Options) { 331 bool is64Bit = TheTriple.getArch() == Triple::x86_64; 332 333 MCAsmInfo *MAI; 334 if (TheTriple.isOSBinFormatMachO()) { 335 if (is64Bit) 336 MAI = new X86_64MCAsmInfoDarwin(TheTriple); 337 else 338 MAI = new X86MCAsmInfoDarwin(TheTriple); 339 } else if (TheTriple.isOSBinFormatELF()) { 340 // Force the use of an ELF container. 341 MAI = new X86ELFMCAsmInfo(TheTriple); 342 } else if (TheTriple.isWindowsMSVCEnvironment() || 343 TheTriple.isWindowsCoreCLREnvironment()) { 344 if (Options.getAssemblyLanguage().equals_insensitive("masm")) 345 MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple); 346 else 347 MAI = new X86MCAsmInfoMicrosoft(TheTriple); 348 } else if (TheTriple.isOSCygMing() || 349 TheTriple.isWindowsItaniumEnvironment()) { 350 MAI = new X86MCAsmInfoGNUCOFF(TheTriple); 351 } else { 352 // The default is ELF. 353 MAI = new X86ELFMCAsmInfo(TheTriple); 354 } 355 356 // Initialize initial frame state. 357 // Calculate amount of bytes used for return address storing 358 int stackGrowth = is64Bit ? -8 : -4; 359 360 // Initial state of the frame pointer is esp+stackGrowth. 361 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP; 362 MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa( 363 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth); 364 MAI->addInitialFrameState(Inst); 365 366 // Add return address to move list 367 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP; 368 MCCFIInstruction Inst2 = MCCFIInstruction::createOffset( 369 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth); 370 MAI->addInitialFrameState(Inst2); 371 372 return MAI; 373 } 374 375 static MCInstPrinter *createX86MCInstPrinter(const Triple &T, 376 unsigned SyntaxVariant, 377 const MCAsmInfo &MAI, 378 const MCInstrInfo &MII, 379 const MCRegisterInfo &MRI) { 380 if (SyntaxVariant == 0) 381 return new X86ATTInstPrinter(MAI, MII, MRI); 382 if (SyntaxVariant == 1) 383 return new X86IntelInstPrinter(MAI, MII, MRI); 384 return nullptr; 385 } 386 387 static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple, 388 MCContext &Ctx) { 389 // Default to the stock relocation info. 390 return llvm::createMCRelocationInfo(TheTriple, Ctx); 391 } 392 393 namespace llvm { 394 namespace X86_MC { 395 396 class X86MCInstrAnalysis : public MCInstrAnalysis { 397 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete; 398 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete; 399 virtual ~X86MCInstrAnalysis() = default; 400 401 public: 402 X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {} 403 404 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS 405 #include "X86GenSubtargetInfo.inc" 406 407 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, 408 APInt &Mask) const override; 409 std::vector<std::pair<uint64_t, uint64_t>> 410 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents, 411 uint64_t GotSectionVA, 412 const Triple &TargetTriple) const override; 413 414 bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, 415 uint64_t &Target) const override; 416 Optional<uint64_t> evaluateMemoryOperandAddress(const MCInst &Inst, 417 const MCSubtargetInfo *STI, 418 uint64_t Addr, 419 uint64_t Size) const override; 420 Optional<uint64_t> 421 getMemoryOperandRelocationOffset(const MCInst &Inst, 422 uint64_t Size) const override; 423 }; 424 425 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS 426 #include "X86GenSubtargetInfo.inc" 427 428 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI, 429 const MCInst &Inst, 430 APInt &Mask) const { 431 const MCInstrDesc &Desc = Info->get(Inst.getOpcode()); 432 unsigned NumDefs = Desc.getNumDefs(); 433 unsigned NumImplicitDefs = Desc.getNumImplicitDefs(); 434 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs && 435 "Unexpected number of bits in the mask!"); 436 437 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX; 438 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX; 439 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP; 440 441 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID); 442 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID); 443 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID); 444 445 auto ClearsSuperReg = [=](unsigned RegID) { 446 // On X86-64, a general purpose integer register is viewed as a 64-bit 447 // register internal to the processor. 448 // An update to the lower 32 bits of a 64 bit integer register is 449 // architecturally defined to zero extend the upper 32 bits. 450 if (GR32RC.contains(RegID)) 451 return true; 452 453 // Early exit if this instruction has no vex/evex/xop prefix. 454 if (!HasEVEX && !HasVEX && !HasXOP) 455 return false; 456 457 // All VEX and EVEX encoded instructions are defined to zero the high bits 458 // of the destination register up to VLMAX (i.e. the maximum vector register 459 // width pertaining to the instruction). 460 // We assume the same behavior for XOP instructions too. 461 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID); 462 }; 463 464 Mask.clearAllBits(); 465 for (unsigned I = 0, E = NumDefs; I < E; ++I) { 466 const MCOperand &Op = Inst.getOperand(I); 467 if (ClearsSuperReg(Op.getReg())) 468 Mask.setBit(I); 469 } 470 471 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) { 472 const MCPhysReg Reg = Desc.getImplicitDefs()[I]; 473 if (ClearsSuperReg(Reg)) 474 Mask.setBit(NumDefs + I); 475 } 476 477 return Mask.getBoolValue(); 478 } 479 480 static std::vector<std::pair<uint64_t, uint64_t>> 481 findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents, 482 uint64_t GotPltSectionVA) { 483 // Do a lightweight parsing of PLT entries. 484 std::vector<std::pair<uint64_t, uint64_t>> Result; 485 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { 486 // Recognize a jmp. 487 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) { 488 // The jmp instruction at the beginning of each PLT entry jumps to the 489 // address of the base of the .got.plt section plus the immediate. 490 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 491 Result.push_back( 492 std::make_pair(PltSectionVA + Byte, GotPltSectionVA + Imm)); 493 Byte += 6; 494 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { 495 // The jmp instruction at the beginning of each PLT entry jumps to the 496 // immediate. 497 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 498 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm)); 499 Byte += 6; 500 } else 501 Byte++; 502 } 503 return Result; 504 } 505 506 static std::vector<std::pair<uint64_t, uint64_t>> 507 findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) { 508 // Do a lightweight parsing of PLT entries. 509 std::vector<std::pair<uint64_t, uint64_t>> Result; 510 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { 511 // Recognize a jmp. 512 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) { 513 // The jmp instruction at the beginning of each PLT entry jumps to the 514 // address of the next instruction plus the immediate. 515 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2); 516 Result.push_back( 517 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm)); 518 Byte += 6; 519 } else 520 Byte++; 521 } 522 return Result; 523 } 524 525 std::vector<std::pair<uint64_t, uint64_t>> X86MCInstrAnalysis::findPltEntries( 526 uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents, 527 uint64_t GotPltSectionVA, const Triple &TargetTriple) const { 528 switch (TargetTriple.getArch()) { 529 case Triple::x86: 530 return findX86PltEntries(PltSectionVA, PltContents, GotPltSectionVA); 531 case Triple::x86_64: 532 return findX86_64PltEntries(PltSectionVA, PltContents); 533 default: 534 return {}; 535 } 536 } 537 538 bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr, 539 uint64_t Size, uint64_t &Target) const { 540 if (Inst.getNumOperands() == 0 || 541 Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL) 542 return false; 543 Target = Addr + Size + Inst.getOperand(0).getImm(); 544 return true; 545 } 546 547 Optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress( 548 const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, 549 uint64_t Size) const { 550 const MCInstrDesc &MCID = Info->get(Inst.getOpcode()); 551 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags); 552 if (MemOpStart == -1) 553 return None; 554 MemOpStart += X86II::getOperandBias(MCID); 555 556 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg); 557 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg); 558 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg); 559 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); 560 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); 561 if (SegReg.getReg() != 0 || IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || 562 !Disp.isImm()) 563 return None; 564 565 // RIP-relative addressing. 566 if (BaseReg.getReg() == X86::RIP) 567 return Addr + Size + Disp.getImm(); 568 569 return None; 570 } 571 572 Optional<uint64_t> 573 X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst, 574 uint64_t Size) const { 575 if (Inst.getOpcode() != X86::LEA64r) 576 return None; 577 const MCInstrDesc &MCID = Info->get(Inst.getOpcode()); 578 int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags); 579 if (MemOpStart == -1) 580 return None; 581 MemOpStart += X86II::getOperandBias(MCID); 582 const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg); 583 const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg); 584 const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg); 585 const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt); 586 const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp); 587 // Must be a simple rip-relative address. 588 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() != 0 || 589 IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || !Disp.isImm()) 590 return None; 591 // rip-relative ModR/M immediate is 32 bits. 592 assert(Size > 4 && "invalid instruction size for rip-relative lea"); 593 return Size - 4; 594 } 595 596 } // end of namespace X86_MC 597 598 } // end of namespace llvm 599 600 static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) { 601 return new X86_MC::X86MCInstrAnalysis(Info); 602 } 603 604 // Force static initialization. 605 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86TargetMC() { 606 for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) { 607 // Register the MC asm info. 608 RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo); 609 610 // Register the MC instruction info. 611 TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo); 612 613 // Register the MC register info. 614 TargetRegistry::RegisterMCRegInfo(*T, createX86MCRegisterInfo); 615 616 // Register the MC subtarget info. 617 TargetRegistry::RegisterMCSubtargetInfo(*T, 618 X86_MC::createX86MCSubtargetInfo); 619 620 // Register the MC instruction analyzer. 621 TargetRegistry::RegisterMCInstrAnalysis(*T, createX86MCInstrAnalysis); 622 623 // Register the code emitter. 624 TargetRegistry::RegisterMCCodeEmitter(*T, createX86MCCodeEmitter); 625 626 // Register the obj target streamer. 627 TargetRegistry::RegisterObjectTargetStreamer(*T, 628 createX86ObjectTargetStreamer); 629 630 // Register the asm target streamer. 631 TargetRegistry::RegisterAsmTargetStreamer(*T, createX86AsmTargetStreamer); 632 633 TargetRegistry::RegisterCOFFStreamer(*T, createX86WinCOFFStreamer); 634 635 // Register the MCInstPrinter. 636 TargetRegistry::RegisterMCInstPrinter(*T, createX86MCInstPrinter); 637 638 // Register the MC relocation info. 639 TargetRegistry::RegisterMCRelocationInfo(*T, createX86MCRelocationInfo); 640 } 641 642 // Register the asm backend. 643 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(), 644 createX86_32AsmBackend); 645 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(), 646 createX86_64AsmBackend); 647 } 648 649 MCRegister llvm::getX86SubSuperRegisterOrZero(MCRegister Reg, unsigned Size, 650 bool High) { 651 switch (Size) { 652 default: return X86::NoRegister; 653 case 8: 654 if (High) { 655 switch (Reg.id()) { 656 default: return getX86SubSuperRegisterOrZero(Reg, 64); 657 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 658 return X86::SI; 659 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 660 return X86::DI; 661 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 662 return X86::BP; 663 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 664 return X86::SP; 665 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 666 return X86::AH; 667 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 668 return X86::DH; 669 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 670 return X86::CH; 671 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 672 return X86::BH; 673 } 674 } else { 675 switch (Reg.id()) { 676 default: return X86::NoRegister; 677 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 678 return X86::AL; 679 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 680 return X86::DL; 681 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 682 return X86::CL; 683 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 684 return X86::BL; 685 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 686 return X86::SIL; 687 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 688 return X86::DIL; 689 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 690 return X86::BPL; 691 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 692 return X86::SPL; 693 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 694 return X86::R8B; 695 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 696 return X86::R9B; 697 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 698 return X86::R10B; 699 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 700 return X86::R11B; 701 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 702 return X86::R12B; 703 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 704 return X86::R13B; 705 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 706 return X86::R14B; 707 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 708 return X86::R15B; 709 } 710 } 711 case 16: 712 switch (Reg.id()) { 713 default: return X86::NoRegister; 714 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 715 return X86::AX; 716 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 717 return X86::DX; 718 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 719 return X86::CX; 720 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 721 return X86::BX; 722 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 723 return X86::SI; 724 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 725 return X86::DI; 726 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 727 return X86::BP; 728 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 729 return X86::SP; 730 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 731 return X86::R8W; 732 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 733 return X86::R9W; 734 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 735 return X86::R10W; 736 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 737 return X86::R11W; 738 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 739 return X86::R12W; 740 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 741 return X86::R13W; 742 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 743 return X86::R14W; 744 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 745 return X86::R15W; 746 } 747 case 32: 748 switch (Reg.id()) { 749 default: return X86::NoRegister; 750 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 751 return X86::EAX; 752 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 753 return X86::EDX; 754 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 755 return X86::ECX; 756 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 757 return X86::EBX; 758 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 759 return X86::ESI; 760 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 761 return X86::EDI; 762 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 763 return X86::EBP; 764 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 765 return X86::ESP; 766 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 767 return X86::R8D; 768 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 769 return X86::R9D; 770 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 771 return X86::R10D; 772 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 773 return X86::R11D; 774 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 775 return X86::R12D; 776 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 777 return X86::R13D; 778 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 779 return X86::R14D; 780 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 781 return X86::R15D; 782 } 783 case 64: 784 switch (Reg.id()) { 785 default: return 0; 786 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: 787 return X86::RAX; 788 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: 789 return X86::RDX; 790 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: 791 return X86::RCX; 792 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX: 793 return X86::RBX; 794 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: 795 return X86::RSI; 796 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI: 797 return X86::RDI; 798 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP: 799 return X86::RBP; 800 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP: 801 return X86::RSP; 802 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8: 803 return X86::R8; 804 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9: 805 return X86::R9; 806 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10: 807 return X86::R10; 808 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: 809 return X86::R11; 810 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: 811 return X86::R12; 812 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: 813 return X86::R13; 814 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: 815 return X86::R14; 816 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: 817 return X86::R15; 818 } 819 } 820 } 821 822 MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High) { 823 MCRegister Res = getX86SubSuperRegisterOrZero(Reg, Size, High); 824 assert(Res != X86::NoRegister && "Unexpected register or VT"); 825 return Res; 826 } 827 828 829