1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains code to lower X86 MachineInstrs to their corresponding 10 // MCInst records. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/X86ATTInstPrinter.h" 15 #include "MCTargetDesc/X86BaseInfo.h" 16 #include "MCTargetDesc/X86InstComments.h" 17 #include "MCTargetDesc/X86TargetStreamer.h" 18 #include "Utils/X86ShuffleDecode.h" 19 #include "X86AsmPrinter.h" 20 #include "X86RegisterInfo.h" 21 #include "X86ShuffleDecodeConstantPool.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/SmallString.h" 24 #include "llvm/ADT/iterator_range.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineModuleInfoImpls.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/StackMaps.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/GlobalValue.h" 32 #include "llvm/IR/Mangler.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/MC/MCCodeEmitter.h" 35 #include "llvm/MC/MCContext.h" 36 #include "llvm/MC/MCExpr.h" 37 #include "llvm/MC/MCFixup.h" 38 #include "llvm/MC/MCInst.h" 39 #include "llvm/MC/MCInstBuilder.h" 40 #include "llvm/MC/MCSection.h" 41 #include "llvm/MC/MCSectionELF.h" 42 #include "llvm/MC/MCStreamer.h" 43 #include "llvm/MC/MCSymbol.h" 44 #include "llvm/MC/MCSymbolELF.h" 45 #include "llvm/Target/TargetLoweringObjectFile.h" 46 47 using namespace llvm; 48 49 namespace { 50 51 /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. 52 class X86MCInstLower { 53 MCContext &Ctx; 54 const MachineFunction &MF; 55 const TargetMachine &TM; 56 const MCAsmInfo &MAI; 57 X86AsmPrinter &AsmPrinter; 58 59 public: 60 X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); 61 62 Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, 63 const MachineOperand &MO) const; 64 void Lower(const MachineInstr *MI, MCInst &OutMI) const; 65 66 MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; 67 MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; 68 69 private: 70 MachineModuleInfoMachO &getMachOMMI() const; 71 }; 72 73 } // end anonymous namespace 74 75 // Emit a minimal sequence of nops spanning NumBytes bytes. 76 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 77 const MCSubtargetInfo &STI); 78 79 void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, 80 const MCSubtargetInfo &STI, 81 MCCodeEmitter *CodeEmitter) { 82 if (InShadow) { 83 SmallString<256> Code; 84 SmallVector<MCFixup, 4> Fixups; 85 raw_svector_ostream VecOS(Code); 86 CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); 87 CurrentShadowSize += Code.size(); 88 if (CurrentShadowSize >= RequiredShadowSize) 89 InShadow = false; // The shadow is big enough. Stop counting. 90 } 91 } 92 93 void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( 94 MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { 95 if (InShadow && CurrentShadowSize < RequiredShadowSize) { 96 InShadow = false; 97 EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, 98 MF->getSubtarget<X86Subtarget>().is64Bit(), STI); 99 } 100 } 101 102 void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { 103 OutStreamer->EmitInstruction(Inst, getSubtargetInfo()); 104 SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); 105 } 106 107 X86MCInstLower::X86MCInstLower(const MachineFunction &mf, 108 X86AsmPrinter &asmprinter) 109 : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), 110 AsmPrinter(asmprinter) {} 111 112 MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { 113 return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); 114 } 115 116 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol 117 /// operand to an MCSymbol. 118 MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const { 119 const DataLayout &DL = MF.getDataLayout(); 120 assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && 121 "Isn't a symbol reference"); 122 123 MCSymbol *Sym = nullptr; 124 SmallString<128> Name; 125 StringRef Suffix; 126 127 switch (MO.getTargetFlags()) { 128 case X86II::MO_DLLIMPORT: 129 // Handle dllimport linkage. 130 Name += "__imp_"; 131 break; 132 case X86II::MO_COFFSTUB: 133 Name += ".refptr."; 134 break; 135 case X86II::MO_DARWIN_NONLAZY: 136 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 137 Suffix = "$non_lazy_ptr"; 138 break; 139 } 140 141 if (!Suffix.empty()) 142 Name += DL.getPrivateGlobalPrefix(); 143 144 if (MO.isGlobal()) { 145 const GlobalValue *GV = MO.getGlobal(); 146 AsmPrinter.getNameWithPrefix(Name, GV); 147 } else if (MO.isSymbol()) { 148 Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); 149 } else if (MO.isMBB()) { 150 assert(Suffix.empty()); 151 Sym = MO.getMBB()->getSymbol(); 152 } 153 154 Name += Suffix; 155 if (!Sym) 156 Sym = Ctx.getOrCreateSymbol(Name); 157 158 // If the target flags on the operand changes the name of the symbol, do that 159 // before we return the symbol. 160 switch (MO.getTargetFlags()) { 161 default: 162 break; 163 case X86II::MO_COFFSTUB: { 164 MachineModuleInfoCOFF &MMICOFF = 165 MF.getMMI().getObjFileInfo<MachineModuleInfoCOFF>(); 166 MachineModuleInfoImpl::StubValueTy &StubSym = MMICOFF.getGVStubEntry(Sym); 167 if (!StubSym.getPointer()) { 168 assert(MO.isGlobal() && "Extern symbol not handled yet"); 169 StubSym = MachineModuleInfoImpl::StubValueTy( 170 AsmPrinter.getSymbol(MO.getGlobal()), true); 171 } 172 break; 173 } 174 case X86II::MO_DARWIN_NONLAZY: 175 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { 176 MachineModuleInfoImpl::StubValueTy &StubSym = 177 getMachOMMI().getGVStubEntry(Sym); 178 if (!StubSym.getPointer()) { 179 assert(MO.isGlobal() && "Extern symbol not handled yet"); 180 StubSym = MachineModuleInfoImpl::StubValueTy( 181 AsmPrinter.getSymbol(MO.getGlobal()), 182 !MO.getGlobal()->hasInternalLinkage()); 183 } 184 break; 185 } 186 } 187 188 return Sym; 189 } 190 191 MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, 192 MCSymbol *Sym) const { 193 // FIXME: We would like an efficient form for this, so we don't have to do a 194 // lot of extra uniquing. 195 const MCExpr *Expr = nullptr; 196 MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; 197 198 switch (MO.getTargetFlags()) { 199 default: 200 llvm_unreachable("Unknown target flag on GV operand"); 201 case X86II::MO_NO_FLAG: // No flag. 202 // These affect the name of the symbol, not any suffix. 203 case X86II::MO_DARWIN_NONLAZY: 204 case X86II::MO_DLLIMPORT: 205 case X86II::MO_COFFSTUB: 206 break; 207 208 case X86II::MO_TLVP: 209 RefKind = MCSymbolRefExpr::VK_TLVP; 210 break; 211 case X86II::MO_TLVP_PIC_BASE: 212 Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); 213 // Subtract the pic base. 214 Expr = MCBinaryExpr::createSub( 215 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); 216 break; 217 case X86II::MO_SECREL: 218 RefKind = MCSymbolRefExpr::VK_SECREL; 219 break; 220 case X86II::MO_TLSGD: 221 RefKind = MCSymbolRefExpr::VK_TLSGD; 222 break; 223 case X86II::MO_TLSLD: 224 RefKind = MCSymbolRefExpr::VK_TLSLD; 225 break; 226 case X86II::MO_TLSLDM: 227 RefKind = MCSymbolRefExpr::VK_TLSLDM; 228 break; 229 case X86II::MO_GOTTPOFF: 230 RefKind = MCSymbolRefExpr::VK_GOTTPOFF; 231 break; 232 case X86II::MO_INDNTPOFF: 233 RefKind = MCSymbolRefExpr::VK_INDNTPOFF; 234 break; 235 case X86II::MO_TPOFF: 236 RefKind = MCSymbolRefExpr::VK_TPOFF; 237 break; 238 case X86II::MO_DTPOFF: 239 RefKind = MCSymbolRefExpr::VK_DTPOFF; 240 break; 241 case X86II::MO_NTPOFF: 242 RefKind = MCSymbolRefExpr::VK_NTPOFF; 243 break; 244 case X86II::MO_GOTNTPOFF: 245 RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; 246 break; 247 case X86II::MO_GOTPCREL: 248 RefKind = MCSymbolRefExpr::VK_GOTPCREL; 249 break; 250 case X86II::MO_GOT: 251 RefKind = MCSymbolRefExpr::VK_GOT; 252 break; 253 case X86II::MO_GOTOFF: 254 RefKind = MCSymbolRefExpr::VK_GOTOFF; 255 break; 256 case X86II::MO_PLT: 257 RefKind = MCSymbolRefExpr::VK_PLT; 258 break; 259 case X86II::MO_ABS8: 260 RefKind = MCSymbolRefExpr::VK_X86_ABS8; 261 break; 262 case X86II::MO_PIC_BASE_OFFSET: 263 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 264 Expr = MCSymbolRefExpr::create(Sym, Ctx); 265 // Subtract the pic base. 266 Expr = MCBinaryExpr::createSub( 267 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); 268 if (MO.isJTI()) { 269 assert(MAI.doesSetDirectiveSuppressReloc()); 270 // If .set directive is supported, use it to reduce the number of 271 // relocations the assembler will generate for differences between 272 // local labels. This is only safe when the symbols are in the same 273 // section so we are restricting it to jumptable references. 274 MCSymbol *Label = Ctx.createTempSymbol(); 275 AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); 276 Expr = MCSymbolRefExpr::create(Label, Ctx); 277 } 278 break; 279 } 280 281 if (!Expr) 282 Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); 283 284 if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) 285 Expr = MCBinaryExpr::createAdd( 286 Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); 287 return MCOperand::createExpr(Expr); 288 } 289 290 /// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with 291 /// a short fixed-register form. 292 static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { 293 unsigned ImmOp = Inst.getNumOperands() - 1; 294 assert(Inst.getOperand(0).isReg() && 295 (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && 296 ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && 297 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || 298 Inst.getNumOperands() == 2) && 299 "Unexpected instruction!"); 300 301 // Check whether the destination register can be fixed. 302 unsigned Reg = Inst.getOperand(0).getReg(); 303 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 304 return; 305 306 // If so, rewrite the instruction. 307 MCOperand Saved = Inst.getOperand(ImmOp); 308 Inst = MCInst(); 309 Inst.setOpcode(Opcode); 310 Inst.addOperand(Saved); 311 } 312 313 /// If a movsx instruction has a shorter encoding for the used register 314 /// simplify the instruction to use it instead. 315 static void SimplifyMOVSX(MCInst &Inst) { 316 unsigned NewOpcode = 0; 317 unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); 318 switch (Inst.getOpcode()) { 319 default: 320 llvm_unreachable("Unexpected instruction!"); 321 case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw 322 if (Op0 == X86::AX && Op1 == X86::AL) 323 NewOpcode = X86::CBW; 324 break; 325 case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl 326 if (Op0 == X86::EAX && Op1 == X86::AX) 327 NewOpcode = X86::CWDE; 328 break; 329 case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq 330 if (Op0 == X86::RAX && Op1 == X86::EAX) 331 NewOpcode = X86::CDQE; 332 break; 333 } 334 335 if (NewOpcode != 0) { 336 Inst = MCInst(); 337 Inst.setOpcode(NewOpcode); 338 } 339 } 340 341 /// Simplify things like MOV32rm to MOV32o32a. 342 static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, 343 unsigned Opcode) { 344 // Don't make these simplifications in 64-bit mode; other assemblers don't 345 // perform them because they make the code larger. 346 if (Printer.getSubtarget().is64Bit()) 347 return; 348 349 bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); 350 unsigned AddrBase = IsStore; 351 unsigned RegOp = IsStore ? 0 : 5; 352 unsigned AddrOp = AddrBase + 3; 353 assert( 354 Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && 355 Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && 356 Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && 357 Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && 358 Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && 359 (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && 360 "Unexpected instruction!"); 361 362 // Check whether the destination register can be fixed. 363 unsigned Reg = Inst.getOperand(RegOp).getReg(); 364 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 365 return; 366 367 // Check whether this is an absolute address. 368 // FIXME: We know TLVP symbol refs aren't, but there should be a better way 369 // to do this here. 370 bool Absolute = true; 371 if (Inst.getOperand(AddrOp).isExpr()) { 372 const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); 373 if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) 374 if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) 375 Absolute = false; 376 } 377 378 if (Absolute && 379 (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || 380 Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || 381 Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) 382 return; 383 384 // If so, rewrite the instruction. 385 MCOperand Saved = Inst.getOperand(AddrOp); 386 MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); 387 Inst = MCInst(); 388 Inst.setOpcode(Opcode); 389 Inst.addOperand(Saved); 390 Inst.addOperand(Seg); 391 } 392 393 static unsigned getRetOpcode(const X86Subtarget &Subtarget) { 394 return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; 395 } 396 397 Optional<MCOperand> 398 X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, 399 const MachineOperand &MO) const { 400 switch (MO.getType()) { 401 default: 402 MI->print(errs()); 403 llvm_unreachable("unknown operand type"); 404 case MachineOperand::MO_Register: 405 // Ignore all implicit register operands. 406 if (MO.isImplicit()) 407 return None; 408 return MCOperand::createReg(MO.getReg()); 409 case MachineOperand::MO_Immediate: 410 return MCOperand::createImm(MO.getImm()); 411 case MachineOperand::MO_MachineBasicBlock: 412 case MachineOperand::MO_GlobalAddress: 413 case MachineOperand::MO_ExternalSymbol: 414 return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); 415 case MachineOperand::MO_MCSymbol: 416 return LowerSymbolOperand(MO, MO.getMCSymbol()); 417 case MachineOperand::MO_JumpTableIndex: 418 return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); 419 case MachineOperand::MO_ConstantPoolIndex: 420 return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); 421 case MachineOperand::MO_BlockAddress: 422 return LowerSymbolOperand( 423 MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); 424 case MachineOperand::MO_RegisterMask: 425 // Ignore call clobbers. 426 return None; 427 } 428 } 429 430 // Replace TAILJMP opcodes with their equivalent opcodes that have encoding 431 // information. 432 static unsigned convertTailJumpOpcode(unsigned Opcode) { 433 switch (Opcode) { 434 case X86::TAILJMPr: 435 Opcode = X86::JMP32r; 436 break; 437 case X86::TAILJMPm: 438 Opcode = X86::JMP32m; 439 break; 440 case X86::TAILJMPr64: 441 Opcode = X86::JMP64r; 442 break; 443 case X86::TAILJMPm64: 444 Opcode = X86::JMP64m; 445 break; 446 case X86::TAILJMPr64_REX: 447 Opcode = X86::JMP64r_REX; 448 break; 449 case X86::TAILJMPm64_REX: 450 Opcode = X86::JMP64m_REX; 451 break; 452 case X86::TAILJMPd: 453 case X86::TAILJMPd64: 454 Opcode = X86::JMP_1; 455 break; 456 case X86::TAILJMPd_CC: 457 case X86::TAILJMPd64_CC: 458 Opcode = X86::JCC_1; 459 break; 460 } 461 462 return Opcode; 463 } 464 465 void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { 466 OutMI.setOpcode(MI->getOpcode()); 467 468 for (const MachineOperand &MO : MI->operands()) 469 if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) 470 OutMI.addOperand(MaybeMCOp.getValue()); 471 472 // Handle a few special cases to eliminate operand modifiers. 473 switch (OutMI.getOpcode()) { 474 case X86::LEA64_32r: 475 case X86::LEA64r: 476 case X86::LEA16r: 477 case X86::LEA32r: 478 // LEA should have a segment register, but it must be empty. 479 assert(OutMI.getNumOperands() == 1 + X86::AddrNumOperands && 480 "Unexpected # of LEA operands"); 481 assert(OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && 482 "LEA has segment specified!"); 483 break; 484 485 // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B 486 // if one of the registers is extended, but other isn't. 487 case X86::VMOVZPQILo2PQIrr: 488 case X86::VMOVAPDrr: 489 case X86::VMOVAPDYrr: 490 case X86::VMOVAPSrr: 491 case X86::VMOVAPSYrr: 492 case X86::VMOVDQArr: 493 case X86::VMOVDQAYrr: 494 case X86::VMOVDQUrr: 495 case X86::VMOVDQUYrr: 496 case X86::VMOVUPDrr: 497 case X86::VMOVUPDYrr: 498 case X86::VMOVUPSrr: 499 case X86::VMOVUPSYrr: { 500 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 501 X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { 502 unsigned NewOpc; 503 switch (OutMI.getOpcode()) { 504 default: llvm_unreachable("Invalid opcode"); 505 case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; 506 case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; 507 case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; 508 case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; 509 case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; 510 case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; 511 case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; 512 case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; 513 case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; 514 case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; 515 case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; 516 case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; 517 case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; 518 } 519 OutMI.setOpcode(NewOpc); 520 } 521 break; 522 } 523 case X86::VMOVSDrr: 524 case X86::VMOVSSrr: { 525 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 526 X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { 527 unsigned NewOpc; 528 switch (OutMI.getOpcode()) { 529 default: llvm_unreachable("Invalid opcode"); 530 case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; 531 case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; 532 } 533 OutMI.setOpcode(NewOpc); 534 } 535 break; 536 } 537 538 case X86::VPCMPBZ128rmi: case X86::VPCMPBZ128rmik: 539 case X86::VPCMPBZ128rri: case X86::VPCMPBZ128rrik: 540 case X86::VPCMPBZ256rmi: case X86::VPCMPBZ256rmik: 541 case X86::VPCMPBZ256rri: case X86::VPCMPBZ256rrik: 542 case X86::VPCMPBZrmi: case X86::VPCMPBZrmik: 543 case X86::VPCMPBZrri: case X86::VPCMPBZrrik: 544 case X86::VPCMPDZ128rmi: case X86::VPCMPDZ128rmik: 545 case X86::VPCMPDZ128rmib: case X86::VPCMPDZ128rmibk: 546 case X86::VPCMPDZ128rri: case X86::VPCMPDZ128rrik: 547 case X86::VPCMPDZ256rmi: case X86::VPCMPDZ256rmik: 548 case X86::VPCMPDZ256rmib: case X86::VPCMPDZ256rmibk: 549 case X86::VPCMPDZ256rri: case X86::VPCMPDZ256rrik: 550 case X86::VPCMPDZrmi: case X86::VPCMPDZrmik: 551 case X86::VPCMPDZrmib: case X86::VPCMPDZrmibk: 552 case X86::VPCMPDZrri: case X86::VPCMPDZrrik: 553 case X86::VPCMPQZ128rmi: case X86::VPCMPQZ128rmik: 554 case X86::VPCMPQZ128rmib: case X86::VPCMPQZ128rmibk: 555 case X86::VPCMPQZ128rri: case X86::VPCMPQZ128rrik: 556 case X86::VPCMPQZ256rmi: case X86::VPCMPQZ256rmik: 557 case X86::VPCMPQZ256rmib: case X86::VPCMPQZ256rmibk: 558 case X86::VPCMPQZ256rri: case X86::VPCMPQZ256rrik: 559 case X86::VPCMPQZrmi: case X86::VPCMPQZrmik: 560 case X86::VPCMPQZrmib: case X86::VPCMPQZrmibk: 561 case X86::VPCMPQZrri: case X86::VPCMPQZrrik: 562 case X86::VPCMPWZ128rmi: case X86::VPCMPWZ128rmik: 563 case X86::VPCMPWZ128rri: case X86::VPCMPWZ128rrik: 564 case X86::VPCMPWZ256rmi: case X86::VPCMPWZ256rmik: 565 case X86::VPCMPWZ256rri: case X86::VPCMPWZ256rrik: 566 case X86::VPCMPWZrmi: case X86::VPCMPWZrmik: 567 case X86::VPCMPWZrri: case X86::VPCMPWZrrik: { 568 // Turn immediate 0 into the VPCMPEQ instruction. 569 if (OutMI.getOperand(OutMI.getNumOperands() - 1).getImm() == 0) { 570 unsigned NewOpc; 571 switch (OutMI.getOpcode()) { 572 case X86::VPCMPBZ128rmi: NewOpc = X86::VPCMPEQBZ128rm; break; 573 case X86::VPCMPBZ128rmik: NewOpc = X86::VPCMPEQBZ128rmk; break; 574 case X86::VPCMPBZ128rri: NewOpc = X86::VPCMPEQBZ128rr; break; 575 case X86::VPCMPBZ128rrik: NewOpc = X86::VPCMPEQBZ128rrk; break; 576 case X86::VPCMPBZ256rmi: NewOpc = X86::VPCMPEQBZ256rm; break; 577 case X86::VPCMPBZ256rmik: NewOpc = X86::VPCMPEQBZ256rmk; break; 578 case X86::VPCMPBZ256rri: NewOpc = X86::VPCMPEQBZ256rr; break; 579 case X86::VPCMPBZ256rrik: NewOpc = X86::VPCMPEQBZ256rrk; break; 580 case X86::VPCMPBZrmi: NewOpc = X86::VPCMPEQBZrm; break; 581 case X86::VPCMPBZrmik: NewOpc = X86::VPCMPEQBZrmk; break; 582 case X86::VPCMPBZrri: NewOpc = X86::VPCMPEQBZrr; break; 583 case X86::VPCMPBZrrik: NewOpc = X86::VPCMPEQBZrrk; break; 584 case X86::VPCMPDZ128rmi: NewOpc = X86::VPCMPEQDZ128rm; break; 585 case X86::VPCMPDZ128rmib: NewOpc = X86::VPCMPEQDZ128rmb; break; 586 case X86::VPCMPDZ128rmibk: NewOpc = X86::VPCMPEQDZ128rmbk; break; 587 case X86::VPCMPDZ128rmik: NewOpc = X86::VPCMPEQDZ128rmk; break; 588 case X86::VPCMPDZ128rri: NewOpc = X86::VPCMPEQDZ128rr; break; 589 case X86::VPCMPDZ128rrik: NewOpc = X86::VPCMPEQDZ128rrk; break; 590 case X86::VPCMPDZ256rmi: NewOpc = X86::VPCMPEQDZ256rm; break; 591 case X86::VPCMPDZ256rmib: NewOpc = X86::VPCMPEQDZ256rmb; break; 592 case X86::VPCMPDZ256rmibk: NewOpc = X86::VPCMPEQDZ256rmbk; break; 593 case X86::VPCMPDZ256rmik: NewOpc = X86::VPCMPEQDZ256rmk; break; 594 case X86::VPCMPDZ256rri: NewOpc = X86::VPCMPEQDZ256rr; break; 595 case X86::VPCMPDZ256rrik: NewOpc = X86::VPCMPEQDZ256rrk; break; 596 case X86::VPCMPDZrmi: NewOpc = X86::VPCMPEQDZrm; break; 597 case X86::VPCMPDZrmib: NewOpc = X86::VPCMPEQDZrmb; break; 598 case X86::VPCMPDZrmibk: NewOpc = X86::VPCMPEQDZrmbk; break; 599 case X86::VPCMPDZrmik: NewOpc = X86::VPCMPEQDZrmk; break; 600 case X86::VPCMPDZrri: NewOpc = X86::VPCMPEQDZrr; break; 601 case X86::VPCMPDZrrik: NewOpc = X86::VPCMPEQDZrrk; break; 602 case X86::VPCMPQZ128rmi: NewOpc = X86::VPCMPEQQZ128rm; break; 603 case X86::VPCMPQZ128rmib: NewOpc = X86::VPCMPEQQZ128rmb; break; 604 case X86::VPCMPQZ128rmibk: NewOpc = X86::VPCMPEQQZ128rmbk; break; 605 case X86::VPCMPQZ128rmik: NewOpc = X86::VPCMPEQQZ128rmk; break; 606 case X86::VPCMPQZ128rri: NewOpc = X86::VPCMPEQQZ128rr; break; 607 case X86::VPCMPQZ128rrik: NewOpc = X86::VPCMPEQQZ128rrk; break; 608 case X86::VPCMPQZ256rmi: NewOpc = X86::VPCMPEQQZ256rm; break; 609 case X86::VPCMPQZ256rmib: NewOpc = X86::VPCMPEQQZ256rmb; break; 610 case X86::VPCMPQZ256rmibk: NewOpc = X86::VPCMPEQQZ256rmbk; break; 611 case X86::VPCMPQZ256rmik: NewOpc = X86::VPCMPEQQZ256rmk; break; 612 case X86::VPCMPQZ256rri: NewOpc = X86::VPCMPEQQZ256rr; break; 613 case X86::VPCMPQZ256rrik: NewOpc = X86::VPCMPEQQZ256rrk; break; 614 case X86::VPCMPQZrmi: NewOpc = X86::VPCMPEQQZrm; break; 615 case X86::VPCMPQZrmib: NewOpc = X86::VPCMPEQQZrmb; break; 616 case X86::VPCMPQZrmibk: NewOpc = X86::VPCMPEQQZrmbk; break; 617 case X86::VPCMPQZrmik: NewOpc = X86::VPCMPEQQZrmk; break; 618 case X86::VPCMPQZrri: NewOpc = X86::VPCMPEQQZrr; break; 619 case X86::VPCMPQZrrik: NewOpc = X86::VPCMPEQQZrrk; break; 620 case X86::VPCMPWZ128rmi: NewOpc = X86::VPCMPEQWZ128rm; break; 621 case X86::VPCMPWZ128rmik: NewOpc = X86::VPCMPEQWZ128rmk; break; 622 case X86::VPCMPWZ128rri: NewOpc = X86::VPCMPEQWZ128rr; break; 623 case X86::VPCMPWZ128rrik: NewOpc = X86::VPCMPEQWZ128rrk; break; 624 case X86::VPCMPWZ256rmi: NewOpc = X86::VPCMPEQWZ256rm; break; 625 case X86::VPCMPWZ256rmik: NewOpc = X86::VPCMPEQWZ256rmk; break; 626 case X86::VPCMPWZ256rri: NewOpc = X86::VPCMPEQWZ256rr; break; 627 case X86::VPCMPWZ256rrik: NewOpc = X86::VPCMPEQWZ256rrk; break; 628 case X86::VPCMPWZrmi: NewOpc = X86::VPCMPEQWZrm; break; 629 case X86::VPCMPWZrmik: NewOpc = X86::VPCMPEQWZrmk; break; 630 case X86::VPCMPWZrri: NewOpc = X86::VPCMPEQWZrr; break; 631 case X86::VPCMPWZrrik: NewOpc = X86::VPCMPEQWZrrk; break; 632 } 633 634 OutMI.setOpcode(NewOpc); 635 OutMI.erase(&OutMI.getOperand(OutMI.getNumOperands() - 1)); 636 break; 637 } 638 639 // Turn immediate 6 into the VPCMPGT instruction. 640 if (OutMI.getOperand(OutMI.getNumOperands() - 1).getImm() == 6) { 641 unsigned NewOpc; 642 switch (OutMI.getOpcode()) { 643 case X86::VPCMPBZ128rmi: NewOpc = X86::VPCMPGTBZ128rm; break; 644 case X86::VPCMPBZ128rmik: NewOpc = X86::VPCMPGTBZ128rmk; break; 645 case X86::VPCMPBZ128rri: NewOpc = X86::VPCMPGTBZ128rr; break; 646 case X86::VPCMPBZ128rrik: NewOpc = X86::VPCMPGTBZ128rrk; break; 647 case X86::VPCMPBZ256rmi: NewOpc = X86::VPCMPGTBZ256rm; break; 648 case X86::VPCMPBZ256rmik: NewOpc = X86::VPCMPGTBZ256rmk; break; 649 case X86::VPCMPBZ256rri: NewOpc = X86::VPCMPGTBZ256rr; break; 650 case X86::VPCMPBZ256rrik: NewOpc = X86::VPCMPGTBZ256rrk; break; 651 case X86::VPCMPBZrmi: NewOpc = X86::VPCMPGTBZrm; break; 652 case X86::VPCMPBZrmik: NewOpc = X86::VPCMPGTBZrmk; break; 653 case X86::VPCMPBZrri: NewOpc = X86::VPCMPGTBZrr; break; 654 case X86::VPCMPBZrrik: NewOpc = X86::VPCMPGTBZrrk; break; 655 case X86::VPCMPDZ128rmi: NewOpc = X86::VPCMPGTDZ128rm; break; 656 case X86::VPCMPDZ128rmib: NewOpc = X86::VPCMPGTDZ128rmb; break; 657 case X86::VPCMPDZ128rmibk: NewOpc = X86::VPCMPGTDZ128rmbk; break; 658 case X86::VPCMPDZ128rmik: NewOpc = X86::VPCMPGTDZ128rmk; break; 659 case X86::VPCMPDZ128rri: NewOpc = X86::VPCMPGTDZ128rr; break; 660 case X86::VPCMPDZ128rrik: NewOpc = X86::VPCMPGTDZ128rrk; break; 661 case X86::VPCMPDZ256rmi: NewOpc = X86::VPCMPGTDZ256rm; break; 662 case X86::VPCMPDZ256rmib: NewOpc = X86::VPCMPGTDZ256rmb; break; 663 case X86::VPCMPDZ256rmibk: NewOpc = X86::VPCMPGTDZ256rmbk; break; 664 case X86::VPCMPDZ256rmik: NewOpc = X86::VPCMPGTDZ256rmk; break; 665 case X86::VPCMPDZ256rri: NewOpc = X86::VPCMPGTDZ256rr; break; 666 case X86::VPCMPDZ256rrik: NewOpc = X86::VPCMPGTDZ256rrk; break; 667 case X86::VPCMPDZrmi: NewOpc = X86::VPCMPGTDZrm; break; 668 case X86::VPCMPDZrmib: NewOpc = X86::VPCMPGTDZrmb; break; 669 case X86::VPCMPDZrmibk: NewOpc = X86::VPCMPGTDZrmbk; break; 670 case X86::VPCMPDZrmik: NewOpc = X86::VPCMPGTDZrmk; break; 671 case X86::VPCMPDZrri: NewOpc = X86::VPCMPGTDZrr; break; 672 case X86::VPCMPDZrrik: NewOpc = X86::VPCMPGTDZrrk; break; 673 case X86::VPCMPQZ128rmi: NewOpc = X86::VPCMPGTQZ128rm; break; 674 case X86::VPCMPQZ128rmib: NewOpc = X86::VPCMPGTQZ128rmb; break; 675 case X86::VPCMPQZ128rmibk: NewOpc = X86::VPCMPGTQZ128rmbk; break; 676 case X86::VPCMPQZ128rmik: NewOpc = X86::VPCMPGTQZ128rmk; break; 677 case X86::VPCMPQZ128rri: NewOpc = X86::VPCMPGTQZ128rr; break; 678 case X86::VPCMPQZ128rrik: NewOpc = X86::VPCMPGTQZ128rrk; break; 679 case X86::VPCMPQZ256rmi: NewOpc = X86::VPCMPGTQZ256rm; break; 680 case X86::VPCMPQZ256rmib: NewOpc = X86::VPCMPGTQZ256rmb; break; 681 case X86::VPCMPQZ256rmibk: NewOpc = X86::VPCMPGTQZ256rmbk; break; 682 case X86::VPCMPQZ256rmik: NewOpc = X86::VPCMPGTQZ256rmk; break; 683 case X86::VPCMPQZ256rri: NewOpc = X86::VPCMPGTQZ256rr; break; 684 case X86::VPCMPQZ256rrik: NewOpc = X86::VPCMPGTQZ256rrk; break; 685 case X86::VPCMPQZrmi: NewOpc = X86::VPCMPGTQZrm; break; 686 case X86::VPCMPQZrmib: NewOpc = X86::VPCMPGTQZrmb; break; 687 case X86::VPCMPQZrmibk: NewOpc = X86::VPCMPGTQZrmbk; break; 688 case X86::VPCMPQZrmik: NewOpc = X86::VPCMPGTQZrmk; break; 689 case X86::VPCMPQZrri: NewOpc = X86::VPCMPGTQZrr; break; 690 case X86::VPCMPQZrrik: NewOpc = X86::VPCMPGTQZrrk; break; 691 case X86::VPCMPWZ128rmi: NewOpc = X86::VPCMPGTWZ128rm; break; 692 case X86::VPCMPWZ128rmik: NewOpc = X86::VPCMPGTWZ128rmk; break; 693 case X86::VPCMPWZ128rri: NewOpc = X86::VPCMPGTWZ128rr; break; 694 case X86::VPCMPWZ128rrik: NewOpc = X86::VPCMPGTWZ128rrk; break; 695 case X86::VPCMPWZ256rmi: NewOpc = X86::VPCMPGTWZ256rm; break; 696 case X86::VPCMPWZ256rmik: NewOpc = X86::VPCMPGTWZ256rmk; break; 697 case X86::VPCMPWZ256rri: NewOpc = X86::VPCMPGTWZ256rr; break; 698 case X86::VPCMPWZ256rrik: NewOpc = X86::VPCMPGTWZ256rrk; break; 699 case X86::VPCMPWZrmi: NewOpc = X86::VPCMPGTWZrm; break; 700 case X86::VPCMPWZrmik: NewOpc = X86::VPCMPGTWZrmk; break; 701 case X86::VPCMPWZrri: NewOpc = X86::VPCMPGTWZrr; break; 702 case X86::VPCMPWZrrik: NewOpc = X86::VPCMPGTWZrrk; break; 703 } 704 705 OutMI.setOpcode(NewOpc); 706 OutMI.erase(&OutMI.getOperand(OutMI.getNumOperands() - 1)); 707 break; 708 } 709 710 break; 711 } 712 713 // CALL64r, CALL64pcrel32 - These instructions used to have 714 // register inputs modeled as normal uses instead of implicit uses. As such, 715 // they we used to truncate off all but the first operand (the callee). This 716 // issue seems to have been fixed at some point. This assert verifies that. 717 case X86::CALL64r: 718 case X86::CALL64pcrel32: 719 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!"); 720 break; 721 722 case X86::EH_RETURN: 723 case X86::EH_RETURN64: { 724 OutMI = MCInst(); 725 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 726 break; 727 } 728 729 case X86::CLEANUPRET: { 730 // Replace CLEANUPRET with the appropriate RET. 731 OutMI = MCInst(); 732 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 733 break; 734 } 735 736 case X86::CATCHRET: { 737 // Replace CATCHRET with the appropriate RET. 738 const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); 739 unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; 740 OutMI = MCInst(); 741 OutMI.setOpcode(getRetOpcode(Subtarget)); 742 OutMI.addOperand(MCOperand::createReg(ReturnReg)); 743 break; 744 } 745 746 // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump 747 // instruction. 748 case X86::TAILJMPr: 749 case X86::TAILJMPr64: 750 case X86::TAILJMPr64_REX: 751 case X86::TAILJMPd: 752 case X86::TAILJMPd64: 753 assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands!"); 754 OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); 755 break; 756 757 case X86::TAILJMPd_CC: 758 case X86::TAILJMPd64_CC: 759 assert(OutMI.getNumOperands() == 2 && "Unexpected number of operands!"); 760 OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); 761 break; 762 763 case X86::TAILJMPm: 764 case X86::TAILJMPm64: 765 case X86::TAILJMPm64_REX: 766 assert(OutMI.getNumOperands() == X86::AddrNumOperands && 767 "Unexpected number of operands!"); 768 OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); 769 break; 770 771 case X86::DEC16r: 772 case X86::DEC32r: 773 case X86::INC16r: 774 case X86::INC32r: 775 // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. 776 if (!AsmPrinter.getSubtarget().is64Bit()) { 777 unsigned Opcode; 778 switch (OutMI.getOpcode()) { 779 default: llvm_unreachable("Invalid opcode"); 780 case X86::DEC16r: Opcode = X86::DEC16r_alt; break; 781 case X86::DEC32r: Opcode = X86::DEC32r_alt; break; 782 case X86::INC16r: Opcode = X86::INC16r_alt; break; 783 case X86::INC32r: Opcode = X86::INC32r_alt; break; 784 } 785 OutMI.setOpcode(Opcode); 786 } 787 break; 788 789 // We don't currently select the correct instruction form for instructions 790 // which have a short %eax, etc. form. Handle this by custom lowering, for 791 // now. 792 // 793 // Note, we are currently not handling the following instructions: 794 // MOV64ao8, MOV64o8a 795 // XCHG16ar, XCHG32ar, XCHG64ar 796 case X86::MOV8mr_NOREX: 797 case X86::MOV8mr: 798 case X86::MOV8rm_NOREX: 799 case X86::MOV8rm: 800 case X86::MOV16mr: 801 case X86::MOV16rm: 802 case X86::MOV32mr: 803 case X86::MOV32rm: { 804 unsigned NewOpc; 805 switch (OutMI.getOpcode()) { 806 default: llvm_unreachable("Invalid opcode"); 807 case X86::MOV8mr_NOREX: 808 case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; 809 case X86::MOV8rm_NOREX: 810 case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; 811 case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; 812 case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; 813 case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; 814 case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; 815 } 816 SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); 817 break; 818 } 819 820 case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: 821 case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: 822 case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: 823 case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: 824 case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: 825 case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: 826 case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: 827 case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: 828 case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { 829 unsigned NewOpc; 830 switch (OutMI.getOpcode()) { 831 default: llvm_unreachable("Invalid opcode"); 832 case X86::ADC8ri: NewOpc = X86::ADC8i8; break; 833 case X86::ADC16ri: NewOpc = X86::ADC16i16; break; 834 case X86::ADC32ri: NewOpc = X86::ADC32i32; break; 835 case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; 836 case X86::ADD8ri: NewOpc = X86::ADD8i8; break; 837 case X86::ADD16ri: NewOpc = X86::ADD16i16; break; 838 case X86::ADD32ri: NewOpc = X86::ADD32i32; break; 839 case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; 840 case X86::AND8ri: NewOpc = X86::AND8i8; break; 841 case X86::AND16ri: NewOpc = X86::AND16i16; break; 842 case X86::AND32ri: NewOpc = X86::AND32i32; break; 843 case X86::AND64ri32: NewOpc = X86::AND64i32; break; 844 case X86::CMP8ri: NewOpc = X86::CMP8i8; break; 845 case X86::CMP16ri: NewOpc = X86::CMP16i16; break; 846 case X86::CMP32ri: NewOpc = X86::CMP32i32; break; 847 case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; 848 case X86::OR8ri: NewOpc = X86::OR8i8; break; 849 case X86::OR16ri: NewOpc = X86::OR16i16; break; 850 case X86::OR32ri: NewOpc = X86::OR32i32; break; 851 case X86::OR64ri32: NewOpc = X86::OR64i32; break; 852 case X86::SBB8ri: NewOpc = X86::SBB8i8; break; 853 case X86::SBB16ri: NewOpc = X86::SBB16i16; break; 854 case X86::SBB32ri: NewOpc = X86::SBB32i32; break; 855 case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; 856 case X86::SUB8ri: NewOpc = X86::SUB8i8; break; 857 case X86::SUB16ri: NewOpc = X86::SUB16i16; break; 858 case X86::SUB32ri: NewOpc = X86::SUB32i32; break; 859 case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; 860 case X86::TEST8ri: NewOpc = X86::TEST8i8; break; 861 case X86::TEST16ri: NewOpc = X86::TEST16i16; break; 862 case X86::TEST32ri: NewOpc = X86::TEST32i32; break; 863 case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; 864 case X86::XOR8ri: NewOpc = X86::XOR8i8; break; 865 case X86::XOR16ri: NewOpc = X86::XOR16i16; break; 866 case X86::XOR32ri: NewOpc = X86::XOR32i32; break; 867 case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; 868 } 869 SimplifyShortImmForm(OutMI, NewOpc); 870 break; 871 } 872 873 // Try to shrink some forms of movsx. 874 case X86::MOVSX16rr8: 875 case X86::MOVSX32rr16: 876 case X86::MOVSX64rr32: 877 SimplifyMOVSX(OutMI); 878 break; 879 } 880 } 881 882 void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, 883 const MachineInstr &MI) { 884 bool Is64Bits = MI.getOpcode() == X86::TLS_addr64 || 885 MI.getOpcode() == X86::TLS_base_addr64; 886 MCContext &Ctx = OutStreamer->getContext(); 887 888 MCSymbolRefExpr::VariantKind SRVK; 889 switch (MI.getOpcode()) { 890 case X86::TLS_addr32: 891 case X86::TLS_addr64: 892 SRVK = MCSymbolRefExpr::VK_TLSGD; 893 break; 894 case X86::TLS_base_addr32: 895 SRVK = MCSymbolRefExpr::VK_TLSLDM; 896 break; 897 case X86::TLS_base_addr64: 898 SRVK = MCSymbolRefExpr::VK_TLSLD; 899 break; 900 default: 901 llvm_unreachable("unexpected opcode"); 902 } 903 904 const MCSymbolRefExpr *Sym = MCSymbolRefExpr::create( 905 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), SRVK, Ctx); 906 907 // As of binutils 2.32, ld has a bogus TLS relaxation error when the GD/LD 908 // code sequence using R_X86_64_GOTPCREL (instead of R_X86_64_GOTPCRELX) is 909 // attempted to be relaxed to IE/LE (binutils PR24784). Work around the bug by 910 // only using GOT when GOTPCRELX is enabled. 911 // TODO Delete the workaround when GOTPCRELX becomes commonplace. 912 bool UseGot = MMI->getModule()->getRtLibUseGOT() && 913 Ctx.getAsmInfo()->canRelaxRelocations(); 914 915 if (Is64Bits) { 916 bool NeedsPadding = SRVK == MCSymbolRefExpr::VK_TLSGD; 917 if (NeedsPadding) 918 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 919 EmitAndCountInstruction(MCInstBuilder(X86::LEA64r) 920 .addReg(X86::RDI) 921 .addReg(X86::RIP) 922 .addImm(1) 923 .addReg(0) 924 .addExpr(Sym) 925 .addReg(0)); 926 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("__tls_get_addr"); 927 if (NeedsPadding) { 928 if (!UseGot) 929 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 930 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 931 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); 932 } 933 if (UseGot) { 934 const MCExpr *Expr = MCSymbolRefExpr::create( 935 TlsGetAddr, MCSymbolRefExpr::VK_GOTPCREL, Ctx); 936 EmitAndCountInstruction(MCInstBuilder(X86::CALL64m) 937 .addReg(X86::RIP) 938 .addImm(1) 939 .addReg(0) 940 .addExpr(Expr) 941 .addReg(0)); 942 } else { 943 EmitAndCountInstruction( 944 MCInstBuilder(X86::CALL64pcrel32) 945 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, 946 MCSymbolRefExpr::VK_PLT, Ctx))); 947 } 948 } else { 949 if (SRVK == MCSymbolRefExpr::VK_TLSGD && !UseGot) { 950 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r) 951 .addReg(X86::EAX) 952 .addReg(0) 953 .addImm(1) 954 .addReg(X86::EBX) 955 .addExpr(Sym) 956 .addReg(0)); 957 } else { 958 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r) 959 .addReg(X86::EAX) 960 .addReg(X86::EBX) 961 .addImm(1) 962 .addReg(0) 963 .addExpr(Sym) 964 .addReg(0)); 965 } 966 967 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("___tls_get_addr"); 968 if (UseGot) { 969 const MCExpr *Expr = 970 MCSymbolRefExpr::create(TlsGetAddr, MCSymbolRefExpr::VK_GOT, Ctx); 971 EmitAndCountInstruction(MCInstBuilder(X86::CALL32m) 972 .addReg(X86::EBX) 973 .addImm(1) 974 .addReg(0) 975 .addExpr(Expr) 976 .addReg(0)); 977 } else { 978 EmitAndCountInstruction( 979 MCInstBuilder(X86::CALLpcrel32) 980 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, 981 MCSymbolRefExpr::VK_PLT, Ctx))); 982 } 983 } 984 } 985 986 /// Emit the largest nop instruction smaller than or equal to \p NumBytes 987 /// bytes. Return the size of nop emitted. 988 static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 989 const MCSubtargetInfo &STI) { 990 // This works only for 64bit. For 32bit we have to do additional checking if 991 // the CPU supports multi-byte nops. 992 assert(Is64Bit && "EmitNops only supports X86-64"); 993 994 unsigned NopSize; 995 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; 996 IndexReg = Displacement = SegmentReg = 0; 997 BaseReg = X86::RAX; 998 ScaleVal = 1; 999 switch (NumBytes) { 1000 case 0: 1001 llvm_unreachable("Zero nops?"); 1002 break; 1003 case 1: 1004 NopSize = 1; 1005 Opc = X86::NOOP; 1006 break; 1007 case 2: 1008 NopSize = 2; 1009 Opc = X86::XCHG16ar; 1010 break; 1011 case 3: 1012 NopSize = 3; 1013 Opc = X86::NOOPL; 1014 break; 1015 case 4: 1016 NopSize = 4; 1017 Opc = X86::NOOPL; 1018 Displacement = 8; 1019 break; 1020 case 5: 1021 NopSize = 5; 1022 Opc = X86::NOOPL; 1023 Displacement = 8; 1024 IndexReg = X86::RAX; 1025 break; 1026 case 6: 1027 NopSize = 6; 1028 Opc = X86::NOOPW; 1029 Displacement = 8; 1030 IndexReg = X86::RAX; 1031 break; 1032 case 7: 1033 NopSize = 7; 1034 Opc = X86::NOOPL; 1035 Displacement = 512; 1036 break; 1037 case 8: 1038 NopSize = 8; 1039 Opc = X86::NOOPL; 1040 Displacement = 512; 1041 IndexReg = X86::RAX; 1042 break; 1043 case 9: 1044 NopSize = 9; 1045 Opc = X86::NOOPW; 1046 Displacement = 512; 1047 IndexReg = X86::RAX; 1048 break; 1049 default: 1050 NopSize = 10; 1051 Opc = X86::NOOPW; 1052 Displacement = 512; 1053 IndexReg = X86::RAX; 1054 SegmentReg = X86::CS; 1055 break; 1056 } 1057 1058 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); 1059 NopSize += NumPrefixes; 1060 for (unsigned i = 0; i != NumPrefixes; ++i) 1061 OS.EmitBytes("\x66"); 1062 1063 switch (Opc) { 1064 default: llvm_unreachable("Unexpected opcode"); 1065 case X86::NOOP: 1066 OS.EmitInstruction(MCInstBuilder(Opc), STI); 1067 break; 1068 case X86::XCHG16ar: 1069 OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX), STI); 1070 break; 1071 case X86::NOOPL: 1072 case X86::NOOPW: 1073 OS.EmitInstruction(MCInstBuilder(Opc) 1074 .addReg(BaseReg) 1075 .addImm(ScaleVal) 1076 .addReg(IndexReg) 1077 .addImm(Displacement) 1078 .addReg(SegmentReg), 1079 STI); 1080 break; 1081 } 1082 assert(NopSize <= NumBytes && "We overemitted?"); 1083 return NopSize; 1084 } 1085 1086 /// Emit the optimal amount of multi-byte nops on X86. 1087 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 1088 const MCSubtargetInfo &STI) { 1089 unsigned NopsToEmit = NumBytes; 1090 (void)NopsToEmit; 1091 while (NumBytes) { 1092 NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); 1093 assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!"); 1094 } 1095 } 1096 1097 void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, 1098 X86MCInstLower &MCIL) { 1099 assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64"); 1100 1101 StatepointOpers SOpers(&MI); 1102 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { 1103 EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), 1104 getSubtargetInfo()); 1105 } else { 1106 // Lower call target and choose correct opcode 1107 const MachineOperand &CallTarget = SOpers.getCallTarget(); 1108 MCOperand CallTargetMCOp; 1109 unsigned CallOpcode; 1110 switch (CallTarget.getType()) { 1111 case MachineOperand::MO_GlobalAddress: 1112 case MachineOperand::MO_ExternalSymbol: 1113 CallTargetMCOp = MCIL.LowerSymbolOperand( 1114 CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); 1115 CallOpcode = X86::CALL64pcrel32; 1116 // Currently, we only support relative addressing with statepoints. 1117 // Otherwise, we'll need a scratch register to hold the target 1118 // address. You'll fail asserts during load & relocation if this 1119 // symbol is to far away. (TODO: support non-relative addressing) 1120 break; 1121 case MachineOperand::MO_Immediate: 1122 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); 1123 CallOpcode = X86::CALL64pcrel32; 1124 // Currently, we only support relative addressing with statepoints. 1125 // Otherwise, we'll need a scratch register to hold the target 1126 // immediate. You'll fail asserts during load & relocation if this 1127 // address is to far away. (TODO: support non-relative addressing) 1128 break; 1129 case MachineOperand::MO_Register: 1130 // FIXME: Add retpoline support and remove this. 1131 if (Subtarget->useRetpolineIndirectCalls()) 1132 report_fatal_error("Lowering register statepoints with retpoline not " 1133 "yet implemented."); 1134 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); 1135 CallOpcode = X86::CALL64r; 1136 break; 1137 default: 1138 llvm_unreachable("Unsupported operand type in statepoint call target"); 1139 break; 1140 } 1141 1142 // Emit call 1143 MCInst CallInst; 1144 CallInst.setOpcode(CallOpcode); 1145 CallInst.addOperand(CallTargetMCOp); 1146 OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); 1147 } 1148 1149 // Record our statepoint node in the same section used by STACKMAP 1150 // and PATCHPOINT 1151 SM.recordStatepoint(MI); 1152 } 1153 1154 void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, 1155 X86MCInstLower &MCIL) { 1156 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, 1157 // <opcode>, <operands> 1158 1159 Register DefRegister = FaultingMI.getOperand(0).getReg(); 1160 FaultMaps::FaultKind FK = 1161 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); 1162 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); 1163 unsigned Opcode = FaultingMI.getOperand(3).getImm(); 1164 unsigned OperandsBeginIdx = 4; 1165 1166 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!"); 1167 FM.recordFaultingOp(FK, HandlerLabel); 1168 1169 MCInst MI; 1170 MI.setOpcode(Opcode); 1171 1172 if (DefRegister != X86::NoRegister) 1173 MI.addOperand(MCOperand::createReg(DefRegister)); 1174 1175 for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, 1176 E = FaultingMI.operands_end(); 1177 I != E; ++I) 1178 if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) 1179 MI.addOperand(MaybeOperand.getValue()); 1180 1181 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName()); 1182 OutStreamer->EmitInstruction(MI, getSubtargetInfo()); 1183 } 1184 1185 void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, 1186 X86MCInstLower &MCIL) { 1187 bool Is64Bits = Subtarget->is64Bit(); 1188 MCContext &Ctx = OutStreamer->getContext(); 1189 MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); 1190 const MCSymbolRefExpr *Op = 1191 MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx); 1192 1193 EmitAndCountInstruction( 1194 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) 1195 .addExpr(Op)); 1196 } 1197 1198 void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, 1199 X86MCInstLower &MCIL) { 1200 // PATCHABLE_OP minsize, opcode, operands 1201 1202 unsigned MinSize = MI.getOperand(0).getImm(); 1203 unsigned Opcode = MI.getOperand(1).getImm(); 1204 1205 MCInst MCI; 1206 MCI.setOpcode(Opcode); 1207 for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) 1208 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1209 MCI.addOperand(MaybeOperand.getValue()); 1210 1211 SmallString<256> Code; 1212 SmallVector<MCFixup, 4> Fixups; 1213 raw_svector_ostream VecOS(Code); 1214 CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); 1215 1216 if (Code.size() < MinSize) { 1217 if (MinSize == 2 && Opcode == X86::PUSH64r) { 1218 // This is an optimization that lets us get away without emitting a nop in 1219 // many cases. 1220 // 1221 // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two 1222 // bytes too, so the check on MinSize is important. 1223 MCI.setOpcode(X86::PUSH64rmr); 1224 } else { 1225 unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), 1226 getSubtargetInfo()); 1227 assert(NopSize == MinSize && "Could not implement MinSize!"); 1228 (void)NopSize; 1229 } 1230 } 1231 1232 OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); 1233 } 1234 1235 // Lower a stackmap of the form: 1236 // <id>, <shadowBytes>, ... 1237 void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { 1238 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 1239 SM.recordStackMap(MI); 1240 unsigned NumShadowBytes = MI.getOperand(1).getImm(); 1241 SMShadowTracker.reset(NumShadowBytes); 1242 } 1243 1244 // Lower a patchpoint of the form: 1245 // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... 1246 void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, 1247 X86MCInstLower &MCIL) { 1248 assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64"); 1249 1250 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 1251 1252 SM.recordPatchPoint(MI); 1253 1254 PatchPointOpers opers(&MI); 1255 unsigned ScratchIdx = opers.getNextScratchIdx(); 1256 unsigned EncodedBytes = 0; 1257 const MachineOperand &CalleeMO = opers.getCallTarget(); 1258 1259 // Check for null target. If target is non-null (i.e. is non-zero or is 1260 // symbolic) then emit a call. 1261 if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { 1262 MCOperand CalleeMCOp; 1263 switch (CalleeMO.getType()) { 1264 default: 1265 /// FIXME: Add a verifier check for bad callee types. 1266 llvm_unreachable("Unrecognized callee operand type."); 1267 case MachineOperand::MO_Immediate: 1268 if (CalleeMO.getImm()) 1269 CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); 1270 break; 1271 case MachineOperand::MO_ExternalSymbol: 1272 case MachineOperand::MO_GlobalAddress: 1273 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO, 1274 MCIL.GetSymbolFromOperand(CalleeMO)); 1275 break; 1276 } 1277 1278 // Emit MOV to materialize the target address and the CALL to target. 1279 // This is encoded with 12-13 bytes, depending on which register is used. 1280 Register ScratchReg = MI.getOperand(ScratchIdx).getReg(); 1281 if (X86II::isX86_64ExtendedReg(ScratchReg)) 1282 EncodedBytes = 13; 1283 else 1284 EncodedBytes = 12; 1285 1286 EmitAndCountInstruction( 1287 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); 1288 // FIXME: Add retpoline support and remove this. 1289 if (Subtarget->useRetpolineIndirectCalls()) 1290 report_fatal_error( 1291 "Lowering patchpoint with retpoline not yet implemented."); 1292 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); 1293 } 1294 1295 // Emit padding. 1296 unsigned NumBytes = opers.getNumPatchBytes(); 1297 assert(NumBytes >= EncodedBytes && 1298 "Patchpoint can't request size less than the length of a call."); 1299 1300 EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), 1301 getSubtargetInfo()); 1302 } 1303 1304 void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, 1305 X86MCInstLower &MCIL) { 1306 assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64"); 1307 1308 // We want to emit the following pattern, which follows the x86 calling 1309 // convention to prepare for the trampoline call to be patched in. 1310 // 1311 // .p2align 1, ... 1312 // .Lxray_event_sled_N: 1313 // jmp +N // jump across the instrumentation sled 1314 // ... // set up arguments in register 1315 // callq __xray_CustomEvent@plt // force dependency to symbol 1316 // ... 1317 // <jump here> 1318 // 1319 // After patching, it would look something like: 1320 // 1321 // nopw (2-byte nop) 1322 // ... 1323 // callq __xrayCustomEvent // already lowered 1324 // ... 1325 // 1326 // --- 1327 // First we emit the label and the jump. 1328 auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true); 1329 OutStreamer->AddComment("# XRay Custom Event Log"); 1330 OutStreamer->EmitCodeAlignment(2); 1331 OutStreamer->EmitLabel(CurSled); 1332 1333 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1334 // an operand (computed as an offset from the jmp instruction). 1335 // FIXME: Find another less hacky way do force the relative jump. 1336 OutStreamer->EmitBinaryData("\xeb\x0f"); 1337 1338 // The default C calling convention will place two arguments into %rcx and 1339 // %rdx -- so we only work with those. 1340 unsigned DestRegs[] = {X86::RDI, X86::RSI}; 1341 bool UsedMask[] = {false, false}; 1342 // Filled out in loop. 1343 unsigned SrcRegs[] = {0, 0}; 1344 1345 // Then we put the operands in the %rdi and %rsi registers. We spill the 1346 // values in the register before we clobber them, and mark them as used in 1347 // UsedMask. In case the arguments are already in the correct register, we use 1348 // emit nops appropriately sized to keep the sled the same size in every 1349 // situation. 1350 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1351 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { 1352 assert(Op->isReg() && "Only support arguments in registers"); 1353 SrcRegs[I] = Op->getReg(); 1354 if (SrcRegs[I] != DestRegs[I]) { 1355 UsedMask[I] = true; 1356 EmitAndCountInstruction( 1357 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); 1358 } else { 1359 EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); 1360 } 1361 } 1362 1363 // Now that the register values are stashed, mov arguments into place. 1364 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1365 if (SrcRegs[I] != DestRegs[I]) 1366 EmitAndCountInstruction( 1367 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); 1368 1369 // We emit a hard dependency on the __xray_CustomEvent symbol, which is the 1370 // name of the trampoline to be implemented by the XRay runtime. 1371 auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent"); 1372 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); 1373 if (isPositionIndependent()) 1374 TOp.setTargetFlags(X86II::MO_PLT); 1375 1376 // Emit the call instruction. 1377 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) 1378 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); 1379 1380 // Restore caller-saved and used registers. 1381 for (unsigned I = sizeof UsedMask; I-- > 0;) 1382 if (UsedMask[I]) 1383 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); 1384 else 1385 EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); 1386 1387 OutStreamer->AddComment("xray custom event end."); 1388 1389 // Record the sled version. Older versions of this sled were spelled 1390 // differently, so we let the runtime handle the different offsets we're 1391 // using. 1392 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 1); 1393 } 1394 1395 void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(const MachineInstr &MI, 1396 X86MCInstLower &MCIL) { 1397 assert(Subtarget->is64Bit() && "XRay typed events only supports X86-64"); 1398 1399 // We want to emit the following pattern, which follows the x86 calling 1400 // convention to prepare for the trampoline call to be patched in. 1401 // 1402 // .p2align 1, ... 1403 // .Lxray_event_sled_N: 1404 // jmp +N // jump across the instrumentation sled 1405 // ... // set up arguments in register 1406 // callq __xray_TypedEvent@plt // force dependency to symbol 1407 // ... 1408 // <jump here> 1409 // 1410 // After patching, it would look something like: 1411 // 1412 // nopw (2-byte nop) 1413 // ... 1414 // callq __xrayTypedEvent // already lowered 1415 // ... 1416 // 1417 // --- 1418 // First we emit the label and the jump. 1419 auto CurSled = OutContext.createTempSymbol("xray_typed_event_sled_", true); 1420 OutStreamer->AddComment("# XRay Typed Event Log"); 1421 OutStreamer->EmitCodeAlignment(2); 1422 OutStreamer->EmitLabel(CurSled); 1423 1424 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1425 // an operand (computed as an offset from the jmp instruction). 1426 // FIXME: Find another less hacky way do force the relative jump. 1427 OutStreamer->EmitBinaryData("\xeb\x14"); 1428 1429 // An x86-64 convention may place three arguments into %rcx, %rdx, and R8, 1430 // so we'll work with those. Or we may be called via SystemV, in which case 1431 // we don't have to do any translation. 1432 unsigned DestRegs[] = {X86::RDI, X86::RSI, X86::RDX}; 1433 bool UsedMask[] = {false, false, false}; 1434 1435 // Will fill out src regs in the loop. 1436 unsigned SrcRegs[] = {0, 0, 0}; 1437 1438 // Then we put the operands in the SystemV registers. We spill the values in 1439 // the registers before we clobber them, and mark them as used in UsedMask. 1440 // In case the arguments are already in the correct register, we emit nops 1441 // appropriately sized to keep the sled the same size in every situation. 1442 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1443 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { 1444 // TODO: Is register only support adequate? 1445 assert(Op->isReg() && "Only supports arguments in registers"); 1446 SrcRegs[I] = Op->getReg(); 1447 if (SrcRegs[I] != DestRegs[I]) { 1448 UsedMask[I] = true; 1449 EmitAndCountInstruction( 1450 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); 1451 } else { 1452 EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); 1453 } 1454 } 1455 1456 // In the above loop we only stash all of the destination registers or emit 1457 // nops if the arguments are already in the right place. Doing the actually 1458 // moving is postponed until after all the registers are stashed so nothing 1459 // is clobbers. We've already added nops to account for the size of mov and 1460 // push if the register is in the right place, so we only have to worry about 1461 // emitting movs. 1462 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1463 if (UsedMask[I]) 1464 EmitAndCountInstruction( 1465 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); 1466 1467 // We emit a hard dependency on the __xray_TypedEvent symbol, which is the 1468 // name of the trampoline to be implemented by the XRay runtime. 1469 auto TSym = OutContext.getOrCreateSymbol("__xray_TypedEvent"); 1470 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); 1471 if (isPositionIndependent()) 1472 TOp.setTargetFlags(X86II::MO_PLT); 1473 1474 // Emit the call instruction. 1475 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) 1476 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); 1477 1478 // Restore caller-saved and used registers. 1479 for (unsigned I = sizeof UsedMask; I-- > 0;) 1480 if (UsedMask[I]) 1481 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); 1482 else 1483 EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); 1484 1485 OutStreamer->AddComment("xray typed event end."); 1486 1487 // Record the sled version. 1488 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 0); 1489 } 1490 1491 void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, 1492 X86MCInstLower &MCIL) { 1493 // We want to emit the following pattern: 1494 // 1495 // .p2align 1, ... 1496 // .Lxray_sled_N: 1497 // jmp .tmpN 1498 // # 9 bytes worth of noops 1499 // 1500 // We need the 9 bytes because at runtime, we'd be patching over the full 11 1501 // bytes with the following pattern: 1502 // 1503 // mov %r10, <function id, 32-bit> // 6 bytes 1504 // call <relative offset, 32-bits> // 5 bytes 1505 // 1506 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1507 OutStreamer->EmitCodeAlignment(2); 1508 OutStreamer->EmitLabel(CurSled); 1509 1510 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1511 // an operand (computed as an offset from the jmp instruction). 1512 // FIXME: Find another less hacky way do force the relative jump. 1513 OutStreamer->EmitBytes("\xeb\x09"); 1514 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1515 recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); 1516 } 1517 1518 void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, 1519 X86MCInstLower &MCIL) { 1520 // Since PATCHABLE_RET takes the opcode of the return statement as an 1521 // argument, we use that to emit the correct form of the RET that we want. 1522 // i.e. when we see this: 1523 // 1524 // PATCHABLE_RET X86::RET ... 1525 // 1526 // We should emit the RET followed by sleds. 1527 // 1528 // .p2align 1, ... 1529 // .Lxray_sled_N: 1530 // ret # or equivalent instruction 1531 // # 10 bytes worth of noops 1532 // 1533 // This just makes sure that the alignment for the next instruction is 2. 1534 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1535 OutStreamer->EmitCodeAlignment(2); 1536 OutStreamer->EmitLabel(CurSled); 1537 unsigned OpCode = MI.getOperand(0).getImm(); 1538 MCInst Ret; 1539 Ret.setOpcode(OpCode); 1540 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1541 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1542 Ret.addOperand(MaybeOperand.getValue()); 1543 OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); 1544 EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); 1545 recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); 1546 } 1547 1548 void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, 1549 X86MCInstLower &MCIL) { 1550 // Like PATCHABLE_RET, we have the actual instruction in the operands to this 1551 // instruction so we lower that particular instruction and its operands. 1552 // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how 1553 // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to 1554 // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual 1555 // tail call much like how we have it in PATCHABLE_RET. 1556 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1557 OutStreamer->EmitCodeAlignment(2); 1558 OutStreamer->EmitLabel(CurSled); 1559 auto Target = OutContext.createTempSymbol(); 1560 1561 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1562 // an operand (computed as an offset from the jmp instruction). 1563 // FIXME: Find another less hacky way do force the relative jump. 1564 OutStreamer->EmitBytes("\xeb\x09"); 1565 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1566 OutStreamer->EmitLabel(Target); 1567 recordSled(CurSled, MI, SledKind::TAIL_CALL); 1568 1569 unsigned OpCode = MI.getOperand(0).getImm(); 1570 OpCode = convertTailJumpOpcode(OpCode); 1571 MCInst TC; 1572 TC.setOpcode(OpCode); 1573 1574 // Before emitting the instruction, add a comment to indicate that this is 1575 // indeed a tail call. 1576 OutStreamer->AddComment("TAILCALL"); 1577 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1578 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1579 TC.addOperand(MaybeOperand.getValue()); 1580 OutStreamer->EmitInstruction(TC, getSubtargetInfo()); 1581 } 1582 1583 // Returns instruction preceding MBBI in MachineFunction. 1584 // If MBBI is the first instruction of the first basic block, returns null. 1585 static MachineBasicBlock::const_iterator 1586 PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { 1587 const MachineBasicBlock *MBB = MBBI->getParent(); 1588 while (MBBI == MBB->begin()) { 1589 if (MBB == &MBB->getParent()->front()) 1590 return MachineBasicBlock::const_iterator(); 1591 MBB = MBB->getPrevNode(); 1592 MBBI = MBB->end(); 1593 } 1594 --MBBI; 1595 return MBBI; 1596 } 1597 1598 static const Constant *getConstantFromPool(const MachineInstr &MI, 1599 const MachineOperand &Op) { 1600 if (!Op.isCPI() || Op.getOffset() != 0) 1601 return nullptr; 1602 1603 ArrayRef<MachineConstantPoolEntry> Constants = 1604 MI.getParent()->getParent()->getConstantPool()->getConstants(); 1605 const MachineConstantPoolEntry &ConstantEntry = Constants[Op.getIndex()]; 1606 1607 // Bail if this is a machine constant pool entry, we won't be able to dig out 1608 // anything useful. 1609 if (ConstantEntry.isMachineConstantPoolEntry()) 1610 return nullptr; 1611 1612 const Constant *C = ConstantEntry.Val.ConstVal; 1613 assert((!C || ConstantEntry.getType() == C->getType()) && 1614 "Expected a constant of the same type!"); 1615 return C; 1616 } 1617 1618 static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, 1619 unsigned SrcOp2Idx, ArrayRef<int> Mask) { 1620 std::string Comment; 1621 1622 // Compute the name for a register. This is really goofy because we have 1623 // multiple instruction printers that could (in theory) use different 1624 // names. Fortunately most people use the ATT style (outside of Windows) 1625 // and they actually agree on register naming here. Ultimately, this is 1626 // a comment, and so its OK if it isn't perfect. 1627 auto GetRegisterName = [](unsigned RegNum) -> StringRef { 1628 return X86ATTInstPrinter::getRegisterName(RegNum); 1629 }; 1630 1631 const MachineOperand &DstOp = MI->getOperand(0); 1632 const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); 1633 const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); 1634 1635 StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; 1636 StringRef Src1Name = 1637 SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; 1638 StringRef Src2Name = 1639 SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; 1640 1641 // One source operand, fix the mask to print all elements in one span. 1642 SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); 1643 if (Src1Name == Src2Name) 1644 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) 1645 if (ShuffleMask[i] >= e) 1646 ShuffleMask[i] -= e; 1647 1648 raw_string_ostream CS(Comment); 1649 CS << DstName; 1650 1651 // Handle AVX512 MASK/MASXZ write mask comments. 1652 // MASK: zmmX {%kY} 1653 // MASKZ: zmmX {%kY} {z} 1654 if (SrcOp1Idx > 1) { 1655 assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask"); 1656 1657 const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); 1658 if (WriteMaskOp.isReg()) { 1659 CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; 1660 1661 if (SrcOp1Idx == 2) { 1662 CS << " {z}"; 1663 } 1664 } 1665 } 1666 1667 CS << " = "; 1668 1669 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { 1670 if (i != 0) 1671 CS << ","; 1672 if (ShuffleMask[i] == SM_SentinelZero) { 1673 CS << "zero"; 1674 continue; 1675 } 1676 1677 // Otherwise, it must come from src1 or src2. Print the span of elements 1678 // that comes from this src. 1679 bool isSrc1 = ShuffleMask[i] < (int)e; 1680 CS << (isSrc1 ? Src1Name : Src2Name) << '['; 1681 1682 bool IsFirst = true; 1683 while (i != e && ShuffleMask[i] != SM_SentinelZero && 1684 (ShuffleMask[i] < (int)e) == isSrc1) { 1685 if (!IsFirst) 1686 CS << ','; 1687 else 1688 IsFirst = false; 1689 if (ShuffleMask[i] == SM_SentinelUndef) 1690 CS << "u"; 1691 else 1692 CS << ShuffleMask[i] % (int)e; 1693 ++i; 1694 } 1695 CS << ']'; 1696 --i; // For loop increments element #. 1697 } 1698 CS.flush(); 1699 1700 return Comment; 1701 } 1702 1703 static void printConstant(const APInt &Val, raw_ostream &CS) { 1704 if (Val.getBitWidth() <= 64) { 1705 CS << Val.getZExtValue(); 1706 } else { 1707 // print multi-word constant as (w0,w1) 1708 CS << "("; 1709 for (int i = 0, N = Val.getNumWords(); i < N; ++i) { 1710 if (i > 0) 1711 CS << ","; 1712 CS << Val.getRawData()[i]; 1713 } 1714 CS << ")"; 1715 } 1716 } 1717 1718 static void printConstant(const APFloat &Flt, raw_ostream &CS) { 1719 SmallString<32> Str; 1720 // Force scientific notation to distinquish from integers. 1721 Flt.toString(Str, 0, 0); 1722 CS << Str; 1723 } 1724 1725 static void printConstant(const Constant *COp, raw_ostream &CS) { 1726 if (isa<UndefValue>(COp)) { 1727 CS << "u"; 1728 } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { 1729 printConstant(CI->getValue(), CS); 1730 } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { 1731 printConstant(CF->getValueAPF(), CS); 1732 } else { 1733 CS << "?"; 1734 } 1735 } 1736 1737 void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) { 1738 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1739 assert(getSubtarget().isOSWindows() && "SEH_ instruction Windows only"); 1740 1741 // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86. 1742 if (EmitFPOData) { 1743 X86TargetStreamer *XTS = 1744 static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer()); 1745 switch (MI->getOpcode()) { 1746 case X86::SEH_PushReg: 1747 XTS->emitFPOPushReg(MI->getOperand(0).getImm()); 1748 break; 1749 case X86::SEH_StackAlloc: 1750 XTS->emitFPOStackAlloc(MI->getOperand(0).getImm()); 1751 break; 1752 case X86::SEH_StackAlign: 1753 XTS->emitFPOStackAlign(MI->getOperand(0).getImm()); 1754 break; 1755 case X86::SEH_SetFrame: 1756 assert(MI->getOperand(1).getImm() == 0 && 1757 ".cv_fpo_setframe takes no offset"); 1758 XTS->emitFPOSetFrame(MI->getOperand(0).getImm()); 1759 break; 1760 case X86::SEH_EndPrologue: 1761 XTS->emitFPOEndPrologue(); 1762 break; 1763 case X86::SEH_SaveReg: 1764 case X86::SEH_SaveXMM: 1765 case X86::SEH_PushFrame: 1766 llvm_unreachable("SEH_ directive incompatible with FPO"); 1767 break; 1768 default: 1769 llvm_unreachable("expected SEH_ instruction"); 1770 } 1771 return; 1772 } 1773 1774 // Otherwise, use the .seh_ directives for all other Windows platforms. 1775 switch (MI->getOpcode()) { 1776 case X86::SEH_PushReg: 1777 OutStreamer->EmitWinCFIPushReg(MI->getOperand(0).getImm()); 1778 break; 1779 1780 case X86::SEH_SaveReg: 1781 OutStreamer->EmitWinCFISaveReg(MI->getOperand(0).getImm(), 1782 MI->getOperand(1).getImm()); 1783 break; 1784 1785 case X86::SEH_SaveXMM: 1786 OutStreamer->EmitWinCFISaveXMM(MI->getOperand(0).getImm(), 1787 MI->getOperand(1).getImm()); 1788 break; 1789 1790 case X86::SEH_StackAlloc: 1791 OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); 1792 break; 1793 1794 case X86::SEH_SetFrame: 1795 OutStreamer->EmitWinCFISetFrame(MI->getOperand(0).getImm(), 1796 MI->getOperand(1).getImm()); 1797 break; 1798 1799 case X86::SEH_PushFrame: 1800 OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); 1801 break; 1802 1803 case X86::SEH_EndPrologue: 1804 OutStreamer->EmitWinCFIEndProlog(); 1805 break; 1806 1807 default: 1808 llvm_unreachable("expected SEH_ instruction"); 1809 } 1810 } 1811 1812 static unsigned getRegisterWidth(const MCOperandInfo &Info) { 1813 if (Info.RegClass == X86::VR128RegClassID || 1814 Info.RegClass == X86::VR128XRegClassID) 1815 return 128; 1816 if (Info.RegClass == X86::VR256RegClassID || 1817 Info.RegClass == X86::VR256XRegClassID) 1818 return 256; 1819 if (Info.RegClass == X86::VR512RegClassID) 1820 return 512; 1821 llvm_unreachable("Unknown register class!"); 1822 } 1823 1824 void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { 1825 X86MCInstLower MCInstLowering(*MF, *this); 1826 const X86RegisterInfo *RI = 1827 MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1828 1829 // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that 1830 // are compressed from EVEX encoding to VEX encoding. 1831 if (TM.Options.MCOptions.ShowMCEncoding) { 1832 if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX) 1833 OutStreamer->AddComment("EVEX TO VEX Compression ", false); 1834 } 1835 1836 switch (MI->getOpcode()) { 1837 case TargetOpcode::DBG_VALUE: 1838 llvm_unreachable("Should be handled target independently"); 1839 1840 // Emit nothing here but a comment if we can. 1841 case X86::Int_MemBarrier: 1842 OutStreamer->emitRawComment("MEMBARRIER"); 1843 return; 1844 1845 case X86::EH_RETURN: 1846 case X86::EH_RETURN64: { 1847 // Lower these as normal, but add some comments. 1848 Register Reg = MI->getOperand(0).getReg(); 1849 OutStreamer->AddComment(StringRef("eh_return, addr: %") + 1850 X86ATTInstPrinter::getRegisterName(Reg)); 1851 break; 1852 } 1853 case X86::CLEANUPRET: { 1854 // Lower these as normal, but add some comments. 1855 OutStreamer->AddComment("CLEANUPRET"); 1856 break; 1857 } 1858 1859 case X86::CATCHRET: { 1860 // Lower these as normal, but add some comments. 1861 OutStreamer->AddComment("CATCHRET"); 1862 break; 1863 } 1864 1865 case X86::TAILJMPr: 1866 case X86::TAILJMPm: 1867 case X86::TAILJMPd: 1868 case X86::TAILJMPd_CC: 1869 case X86::TAILJMPr64: 1870 case X86::TAILJMPm64: 1871 case X86::TAILJMPd64: 1872 case X86::TAILJMPd64_CC: 1873 case X86::TAILJMPr64_REX: 1874 case X86::TAILJMPm64_REX: 1875 // Lower these as normal, but add some comments. 1876 OutStreamer->AddComment("TAILCALL"); 1877 break; 1878 1879 case X86::TLS_addr32: 1880 case X86::TLS_addr64: 1881 case X86::TLS_base_addr32: 1882 case X86::TLS_base_addr64: 1883 return LowerTlsAddr(MCInstLowering, *MI); 1884 1885 // Loading/storing mask pairs requires two kmov operations. The second one of these 1886 // needs a 2 byte displacement relative to the specified address (with 32 bit spill 1887 // size). The pairs of 1bit masks up to 16 bit masks all use the same spill size, 1888 // they all are stored using MASKPAIR16STORE, loaded using MASKPAIR16LOAD. 1889 // 1890 // The displacement value might wrap around in theory, thus the asserts in both 1891 // cases. 1892 case X86::MASKPAIR16LOAD: { 1893 int64_t Disp = MI->getOperand(1 + X86::AddrDisp).getImm(); 1894 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); 1895 Register Reg = MI->getOperand(0).getReg(); 1896 Register Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); 1897 Register Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); 1898 1899 // Load the first mask register 1900 MCInstBuilder MIB = MCInstBuilder(X86::KMOVWkm); 1901 MIB.addReg(Reg0); 1902 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1903 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(1 + i)); 1904 MIB.addOperand(Op.getValue()); 1905 } 1906 EmitAndCountInstruction(MIB); 1907 1908 // Load the second mask register of the pair 1909 MIB = MCInstBuilder(X86::KMOVWkm); 1910 MIB.addReg(Reg1); 1911 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1912 if (i == X86::AddrDisp) { 1913 MIB.addImm(Disp + 2); 1914 } else { 1915 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(1 + i)); 1916 MIB.addOperand(Op.getValue()); 1917 } 1918 } 1919 EmitAndCountInstruction(MIB); 1920 return; 1921 } 1922 1923 case X86::MASKPAIR16STORE: { 1924 int64_t Disp = MI->getOperand(X86::AddrDisp).getImm(); 1925 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); 1926 Register Reg = MI->getOperand(X86::AddrNumOperands).getReg(); 1927 Register Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); 1928 Register Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); 1929 1930 // Store the first mask register 1931 MCInstBuilder MIB = MCInstBuilder(X86::KMOVWmk); 1932 for (int i = 0; i < X86::AddrNumOperands; ++i) 1933 MIB.addOperand(MCInstLowering.LowerMachineOperand(MI, MI->getOperand(i)).getValue()); 1934 MIB.addReg(Reg0); 1935 EmitAndCountInstruction(MIB); 1936 1937 // Store the second mask register of the pair 1938 MIB = MCInstBuilder(X86::KMOVWmk); 1939 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1940 if (i == X86::AddrDisp) { 1941 MIB.addImm(Disp + 2); 1942 } else { 1943 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(0 + i)); 1944 MIB.addOperand(Op.getValue()); 1945 } 1946 } 1947 MIB.addReg(Reg1); 1948 EmitAndCountInstruction(MIB); 1949 return; 1950 } 1951 1952 case X86::MOVPC32r: { 1953 // This is a pseudo op for a two instruction sequence with a label, which 1954 // looks like: 1955 // call "L1$pb" 1956 // "L1$pb": 1957 // popl %esi 1958 1959 // Emit the call. 1960 MCSymbol *PICBase = MF->getPICBaseSymbol(); 1961 // FIXME: We would like an efficient form for this, so we don't have to do a 1962 // lot of extra uniquing. 1963 EmitAndCountInstruction( 1964 MCInstBuilder(X86::CALLpcrel32) 1965 .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); 1966 1967 const X86FrameLowering *FrameLowering = 1968 MF->getSubtarget<X86Subtarget>().getFrameLowering(); 1969 bool hasFP = FrameLowering->hasFP(*MF); 1970 1971 // TODO: This is needed only if we require precise CFA. 1972 bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && 1973 !OutStreamer->getDwarfFrameInfos().back().End; 1974 1975 int stackGrowth = -RI->getSlotSize(); 1976 1977 if (HasActiveDwarfFrame && !hasFP) { 1978 OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); 1979 } 1980 1981 // Emit the label. 1982 OutStreamer->EmitLabel(PICBase); 1983 1984 // popl $reg 1985 EmitAndCountInstruction( 1986 MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg())); 1987 1988 if (HasActiveDwarfFrame && !hasFP) { 1989 OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); 1990 } 1991 return; 1992 } 1993 1994 case X86::ADD32ri: { 1995 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. 1996 if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) 1997 break; 1998 1999 // Okay, we have something like: 2000 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) 2001 2002 // For this, we want to print something like: 2003 // MYGLOBAL + (. - PICBASE) 2004 // However, we can't generate a ".", so just emit a new label here and refer 2005 // to it. 2006 MCSymbol *DotSym = OutContext.createTempSymbol(); 2007 OutStreamer->EmitLabel(DotSym); 2008 2009 // Now that we have emitted the label, lower the complex operand expression. 2010 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); 2011 2012 const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); 2013 const MCExpr *PICBase = 2014 MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); 2015 DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); 2016 2017 DotExpr = MCBinaryExpr::createAdd( 2018 MCSymbolRefExpr::create(OpSym, OutContext), DotExpr, OutContext); 2019 2020 EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) 2021 .addReg(MI->getOperand(0).getReg()) 2022 .addReg(MI->getOperand(1).getReg()) 2023 .addExpr(DotExpr)); 2024 return; 2025 } 2026 case TargetOpcode::STATEPOINT: 2027 return LowerSTATEPOINT(*MI, MCInstLowering); 2028 2029 case TargetOpcode::FAULTING_OP: 2030 return LowerFAULTING_OP(*MI, MCInstLowering); 2031 2032 case TargetOpcode::FENTRY_CALL: 2033 return LowerFENTRY_CALL(*MI, MCInstLowering); 2034 2035 case TargetOpcode::PATCHABLE_OP: 2036 return LowerPATCHABLE_OP(*MI, MCInstLowering); 2037 2038 case TargetOpcode::STACKMAP: 2039 return LowerSTACKMAP(*MI); 2040 2041 case TargetOpcode::PATCHPOINT: 2042 return LowerPATCHPOINT(*MI, MCInstLowering); 2043 2044 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: 2045 return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); 2046 2047 case TargetOpcode::PATCHABLE_RET: 2048 return LowerPATCHABLE_RET(*MI, MCInstLowering); 2049 2050 case TargetOpcode::PATCHABLE_TAIL_CALL: 2051 return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); 2052 2053 case TargetOpcode::PATCHABLE_EVENT_CALL: 2054 return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering); 2055 2056 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL: 2057 return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering); 2058 2059 case X86::MORESTACK_RET: 2060 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 2061 return; 2062 2063 case X86::MORESTACK_RET_RESTORE_R10: 2064 // Return, then restore R10. 2065 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 2066 EmitAndCountInstruction( 2067 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX)); 2068 return; 2069 2070 case X86::SEH_PushReg: 2071 case X86::SEH_SaveReg: 2072 case X86::SEH_SaveXMM: 2073 case X86::SEH_StackAlloc: 2074 case X86::SEH_StackAlign: 2075 case X86::SEH_SetFrame: 2076 case X86::SEH_PushFrame: 2077 case X86::SEH_EndPrologue: 2078 EmitSEHInstruction(MI); 2079 return; 2080 2081 case X86::SEH_Epilogue: { 2082 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 2083 MachineBasicBlock::const_iterator MBBI(MI); 2084 // Check if preceded by a call and emit nop if so. 2085 for (MBBI = PrevCrossBBInst(MBBI); 2086 MBBI != MachineBasicBlock::const_iterator(); 2087 MBBI = PrevCrossBBInst(MBBI)) { 2088 // Conservatively assume that pseudo instructions don't emit code and keep 2089 // looking for a call. We may emit an unnecessary nop in some cases. 2090 if (!MBBI->isPseudo()) { 2091 if (MBBI->isCall()) 2092 EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); 2093 break; 2094 } 2095 } 2096 return; 2097 } 2098 2099 // Lower PSHUFB and VPERMILP normally but add a comment if we can find 2100 // a constant shuffle mask. We won't be able to do this at the MC layer 2101 // because the mask isn't an immediate. 2102 case X86::PSHUFBrm: 2103 case X86::VPSHUFBrm: 2104 case X86::VPSHUFBYrm: 2105 case X86::VPSHUFBZ128rm: 2106 case X86::VPSHUFBZ128rmk: 2107 case X86::VPSHUFBZ128rmkz: 2108 case X86::VPSHUFBZ256rm: 2109 case X86::VPSHUFBZ256rmk: 2110 case X86::VPSHUFBZ256rmkz: 2111 case X86::VPSHUFBZrm: 2112 case X86::VPSHUFBZrmk: 2113 case X86::VPSHUFBZrmkz: { 2114 if (!OutStreamer->isVerboseAsm()) 2115 break; 2116 unsigned SrcIdx, MaskIdx; 2117 switch (MI->getOpcode()) { 2118 default: llvm_unreachable("Invalid opcode"); 2119 case X86::PSHUFBrm: 2120 case X86::VPSHUFBrm: 2121 case X86::VPSHUFBYrm: 2122 case X86::VPSHUFBZ128rm: 2123 case X86::VPSHUFBZ256rm: 2124 case X86::VPSHUFBZrm: 2125 SrcIdx = 1; MaskIdx = 5; break; 2126 case X86::VPSHUFBZ128rmkz: 2127 case X86::VPSHUFBZ256rmkz: 2128 case X86::VPSHUFBZrmkz: 2129 SrcIdx = 2; MaskIdx = 6; break; 2130 case X86::VPSHUFBZ128rmk: 2131 case X86::VPSHUFBZ256rmk: 2132 case X86::VPSHUFBZrmk: 2133 SrcIdx = 3; MaskIdx = 7; break; 2134 } 2135 2136 assert(MI->getNumOperands() >= 6 && 2137 "We should always have at least 6 operands!"); 2138 2139 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 2140 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2141 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2142 SmallVector<int, 64> Mask; 2143 DecodePSHUFBMask(C, Width, Mask); 2144 if (!Mask.empty()) 2145 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 2146 } 2147 break; 2148 } 2149 2150 case X86::VPERMILPSrm: 2151 case X86::VPERMILPSYrm: 2152 case X86::VPERMILPSZ128rm: 2153 case X86::VPERMILPSZ128rmk: 2154 case X86::VPERMILPSZ128rmkz: 2155 case X86::VPERMILPSZ256rm: 2156 case X86::VPERMILPSZ256rmk: 2157 case X86::VPERMILPSZ256rmkz: 2158 case X86::VPERMILPSZrm: 2159 case X86::VPERMILPSZrmk: 2160 case X86::VPERMILPSZrmkz: 2161 case X86::VPERMILPDrm: 2162 case X86::VPERMILPDYrm: 2163 case X86::VPERMILPDZ128rm: 2164 case X86::VPERMILPDZ128rmk: 2165 case X86::VPERMILPDZ128rmkz: 2166 case X86::VPERMILPDZ256rm: 2167 case X86::VPERMILPDZ256rmk: 2168 case X86::VPERMILPDZ256rmkz: 2169 case X86::VPERMILPDZrm: 2170 case X86::VPERMILPDZrmk: 2171 case X86::VPERMILPDZrmkz: { 2172 if (!OutStreamer->isVerboseAsm()) 2173 break; 2174 unsigned SrcIdx, MaskIdx; 2175 unsigned ElSize; 2176 switch (MI->getOpcode()) { 2177 default: llvm_unreachable("Invalid opcode"); 2178 case X86::VPERMILPSrm: 2179 case X86::VPERMILPSYrm: 2180 case X86::VPERMILPSZ128rm: 2181 case X86::VPERMILPSZ256rm: 2182 case X86::VPERMILPSZrm: 2183 SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; 2184 case X86::VPERMILPSZ128rmkz: 2185 case X86::VPERMILPSZ256rmkz: 2186 case X86::VPERMILPSZrmkz: 2187 SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; 2188 case X86::VPERMILPSZ128rmk: 2189 case X86::VPERMILPSZ256rmk: 2190 case X86::VPERMILPSZrmk: 2191 SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; 2192 case X86::VPERMILPDrm: 2193 case X86::VPERMILPDYrm: 2194 case X86::VPERMILPDZ128rm: 2195 case X86::VPERMILPDZ256rm: 2196 case X86::VPERMILPDZrm: 2197 SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; 2198 case X86::VPERMILPDZ128rmkz: 2199 case X86::VPERMILPDZ256rmkz: 2200 case X86::VPERMILPDZrmkz: 2201 SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; 2202 case X86::VPERMILPDZ128rmk: 2203 case X86::VPERMILPDZ256rmk: 2204 case X86::VPERMILPDZrmk: 2205 SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; 2206 } 2207 2208 assert(MI->getNumOperands() >= 6 && 2209 "We should always have at least 6 operands!"); 2210 2211 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 2212 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2213 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2214 SmallVector<int, 16> Mask; 2215 DecodeVPERMILPMask(C, ElSize, Width, Mask); 2216 if (!Mask.empty()) 2217 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 2218 } 2219 break; 2220 } 2221 2222 case X86::VPERMIL2PDrm: 2223 case X86::VPERMIL2PSrm: 2224 case X86::VPERMIL2PDYrm: 2225 case X86::VPERMIL2PSYrm: { 2226 if (!OutStreamer->isVerboseAsm()) 2227 break; 2228 assert(MI->getNumOperands() >= 8 && 2229 "We should always have at least 8 operands!"); 2230 2231 const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); 2232 if (!CtrlOp.isImm()) 2233 break; 2234 2235 unsigned ElSize; 2236 switch (MI->getOpcode()) { 2237 default: llvm_unreachable("Invalid opcode"); 2238 case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break; 2239 case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break; 2240 } 2241 2242 const MachineOperand &MaskOp = MI->getOperand(6); 2243 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2244 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2245 SmallVector<int, 16> Mask; 2246 DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask); 2247 if (!Mask.empty()) 2248 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 2249 } 2250 break; 2251 } 2252 2253 case X86::VPPERMrrm: { 2254 if (!OutStreamer->isVerboseAsm()) 2255 break; 2256 assert(MI->getNumOperands() >= 7 && 2257 "We should always have at least 7 operands!"); 2258 2259 const MachineOperand &MaskOp = MI->getOperand(6); 2260 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2261 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2262 SmallVector<int, 16> Mask; 2263 DecodeVPPERMMask(C, Width, Mask); 2264 if (!Mask.empty()) 2265 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 2266 } 2267 break; 2268 } 2269 2270 case X86::MMX_MOVQ64rm: { 2271 if (!OutStreamer->isVerboseAsm()) 2272 break; 2273 if (MI->getNumOperands() <= 4) 2274 break; 2275 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2276 std::string Comment; 2277 raw_string_ostream CS(Comment); 2278 const MachineOperand &DstOp = MI->getOperand(0); 2279 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2280 if (auto *CF = dyn_cast<ConstantFP>(C)) { 2281 CS << "0x" << CF->getValueAPF().bitcastToAPInt().toString(16, false); 2282 OutStreamer->AddComment(CS.str()); 2283 } 2284 } 2285 break; 2286 } 2287 2288 #define MOV_CASE(Prefix, Suffix) \ 2289 case X86::Prefix##MOVAPD##Suffix##rm: \ 2290 case X86::Prefix##MOVAPS##Suffix##rm: \ 2291 case X86::Prefix##MOVUPD##Suffix##rm: \ 2292 case X86::Prefix##MOVUPS##Suffix##rm: \ 2293 case X86::Prefix##MOVDQA##Suffix##rm: \ 2294 case X86::Prefix##MOVDQU##Suffix##rm: 2295 2296 #define MOV_AVX512_CASE(Suffix) \ 2297 case X86::VMOVDQA64##Suffix##rm: \ 2298 case X86::VMOVDQA32##Suffix##rm: \ 2299 case X86::VMOVDQU64##Suffix##rm: \ 2300 case X86::VMOVDQU32##Suffix##rm: \ 2301 case X86::VMOVDQU16##Suffix##rm: \ 2302 case X86::VMOVDQU8##Suffix##rm: \ 2303 case X86::VMOVAPS##Suffix##rm: \ 2304 case X86::VMOVAPD##Suffix##rm: \ 2305 case X86::VMOVUPS##Suffix##rm: \ 2306 case X86::VMOVUPD##Suffix##rm: 2307 2308 #define CASE_ALL_MOV_RM() \ 2309 MOV_CASE(, ) /* SSE */ \ 2310 MOV_CASE(V, ) /* AVX-128 */ \ 2311 MOV_CASE(V, Y) /* AVX-256 */ \ 2312 MOV_AVX512_CASE(Z) \ 2313 MOV_AVX512_CASE(Z256) \ 2314 MOV_AVX512_CASE(Z128) 2315 2316 // For loads from a constant pool to a vector register, print the constant 2317 // loaded. 2318 CASE_ALL_MOV_RM() 2319 case X86::VBROADCASTF128: 2320 case X86::VBROADCASTI128: 2321 case X86::VBROADCASTF32X4Z256rm: 2322 case X86::VBROADCASTF32X4rm: 2323 case X86::VBROADCASTF32X8rm: 2324 case X86::VBROADCASTF64X2Z128rm: 2325 case X86::VBROADCASTF64X2rm: 2326 case X86::VBROADCASTF64X4rm: 2327 case X86::VBROADCASTI32X4Z256rm: 2328 case X86::VBROADCASTI32X4rm: 2329 case X86::VBROADCASTI32X8rm: 2330 case X86::VBROADCASTI64X2Z128rm: 2331 case X86::VBROADCASTI64X2rm: 2332 case X86::VBROADCASTI64X4rm: 2333 if (!OutStreamer->isVerboseAsm()) 2334 break; 2335 if (MI->getNumOperands() <= 4) 2336 break; 2337 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2338 int NumLanes = 1; 2339 // Override NumLanes for the broadcast instructions. 2340 switch (MI->getOpcode()) { 2341 case X86::VBROADCASTF128: NumLanes = 2; break; 2342 case X86::VBROADCASTI128: NumLanes = 2; break; 2343 case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break; 2344 case X86::VBROADCASTF32X4rm: NumLanes = 4; break; 2345 case X86::VBROADCASTF32X8rm: NumLanes = 2; break; 2346 case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break; 2347 case X86::VBROADCASTF64X2rm: NumLanes = 4; break; 2348 case X86::VBROADCASTF64X4rm: NumLanes = 2; break; 2349 case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break; 2350 case X86::VBROADCASTI32X4rm: NumLanes = 4; break; 2351 case X86::VBROADCASTI32X8rm: NumLanes = 2; break; 2352 case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break; 2353 case X86::VBROADCASTI64X2rm: NumLanes = 4; break; 2354 case X86::VBROADCASTI64X4rm: NumLanes = 2; break; 2355 } 2356 2357 std::string Comment; 2358 raw_string_ostream CS(Comment); 2359 const MachineOperand &DstOp = MI->getOperand(0); 2360 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2361 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { 2362 CS << "["; 2363 for (int l = 0; l != NumLanes; ++l) { 2364 for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; 2365 ++i) { 2366 if (i != 0 || l != 0) 2367 CS << ","; 2368 if (CDS->getElementType()->isIntegerTy()) 2369 printConstant(CDS->getElementAsAPInt(i), CS); 2370 else if (CDS->getElementType()->isHalfTy() || 2371 CDS->getElementType()->isFloatTy() || 2372 CDS->getElementType()->isDoubleTy()) 2373 printConstant(CDS->getElementAsAPFloat(i), CS); 2374 else 2375 CS << "?"; 2376 } 2377 } 2378 CS << "]"; 2379 OutStreamer->AddComment(CS.str()); 2380 } else if (auto *CV = dyn_cast<ConstantVector>(C)) { 2381 CS << "<"; 2382 for (int l = 0; l != NumLanes; ++l) { 2383 for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; 2384 ++i) { 2385 if (i != 0 || l != 0) 2386 CS << ","; 2387 printConstant(CV->getOperand(i), CS); 2388 } 2389 } 2390 CS << ">"; 2391 OutStreamer->AddComment(CS.str()); 2392 } 2393 } 2394 break; 2395 case X86::MOVDDUPrm: 2396 case X86::VMOVDDUPrm: 2397 case X86::VMOVDDUPZ128rm: 2398 case X86::VBROADCASTSSrm: 2399 case X86::VBROADCASTSSYrm: 2400 case X86::VBROADCASTSSZ128m: 2401 case X86::VBROADCASTSSZ256m: 2402 case X86::VBROADCASTSSZm: 2403 case X86::VBROADCASTSDYrm: 2404 case X86::VBROADCASTSDZ256m: 2405 case X86::VBROADCASTSDZm: 2406 case X86::VPBROADCASTBrm: 2407 case X86::VPBROADCASTBYrm: 2408 case X86::VPBROADCASTBZ128m: 2409 case X86::VPBROADCASTBZ256m: 2410 case X86::VPBROADCASTBZm: 2411 case X86::VPBROADCASTDrm: 2412 case X86::VPBROADCASTDYrm: 2413 case X86::VPBROADCASTDZ128m: 2414 case X86::VPBROADCASTDZ256m: 2415 case X86::VPBROADCASTDZm: 2416 case X86::VPBROADCASTQrm: 2417 case X86::VPBROADCASTQYrm: 2418 case X86::VPBROADCASTQZ128m: 2419 case X86::VPBROADCASTQZ256m: 2420 case X86::VPBROADCASTQZm: 2421 case X86::VPBROADCASTWrm: 2422 case X86::VPBROADCASTWYrm: 2423 case X86::VPBROADCASTWZ128m: 2424 case X86::VPBROADCASTWZ256m: 2425 case X86::VPBROADCASTWZm: 2426 if (!OutStreamer->isVerboseAsm()) 2427 break; 2428 if (MI->getNumOperands() <= 4) 2429 break; 2430 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2431 int NumElts; 2432 switch (MI->getOpcode()) { 2433 default: llvm_unreachable("Invalid opcode"); 2434 case X86::MOVDDUPrm: NumElts = 2; break; 2435 case X86::VMOVDDUPrm: NumElts = 2; break; 2436 case X86::VMOVDDUPZ128rm: NumElts = 2; break; 2437 case X86::VBROADCASTSSrm: NumElts = 4; break; 2438 case X86::VBROADCASTSSYrm: NumElts = 8; break; 2439 case X86::VBROADCASTSSZ128m: NumElts = 4; break; 2440 case X86::VBROADCASTSSZ256m: NumElts = 8; break; 2441 case X86::VBROADCASTSSZm: NumElts = 16; break; 2442 case X86::VBROADCASTSDYrm: NumElts = 4; break; 2443 case X86::VBROADCASTSDZ256m: NumElts = 4; break; 2444 case X86::VBROADCASTSDZm: NumElts = 8; break; 2445 case X86::VPBROADCASTBrm: NumElts = 16; break; 2446 case X86::VPBROADCASTBYrm: NumElts = 32; break; 2447 case X86::VPBROADCASTBZ128m: NumElts = 16; break; 2448 case X86::VPBROADCASTBZ256m: NumElts = 32; break; 2449 case X86::VPBROADCASTBZm: NumElts = 64; break; 2450 case X86::VPBROADCASTDrm: NumElts = 4; break; 2451 case X86::VPBROADCASTDYrm: NumElts = 8; break; 2452 case X86::VPBROADCASTDZ128m: NumElts = 4; break; 2453 case X86::VPBROADCASTDZ256m: NumElts = 8; break; 2454 case X86::VPBROADCASTDZm: NumElts = 16; break; 2455 case X86::VPBROADCASTQrm: NumElts = 2; break; 2456 case X86::VPBROADCASTQYrm: NumElts = 4; break; 2457 case X86::VPBROADCASTQZ128m: NumElts = 2; break; 2458 case X86::VPBROADCASTQZ256m: NumElts = 4; break; 2459 case X86::VPBROADCASTQZm: NumElts = 8; break; 2460 case X86::VPBROADCASTWrm: NumElts = 8; break; 2461 case X86::VPBROADCASTWYrm: NumElts = 16; break; 2462 case X86::VPBROADCASTWZ128m: NumElts = 8; break; 2463 case X86::VPBROADCASTWZ256m: NumElts = 16; break; 2464 case X86::VPBROADCASTWZm: NumElts = 32; break; 2465 } 2466 2467 std::string Comment; 2468 raw_string_ostream CS(Comment); 2469 const MachineOperand &DstOp = MI->getOperand(0); 2470 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2471 CS << "["; 2472 for (int i = 0; i != NumElts; ++i) { 2473 if (i != 0) 2474 CS << ","; 2475 printConstant(C, CS); 2476 } 2477 CS << "]"; 2478 OutStreamer->AddComment(CS.str()); 2479 } 2480 } 2481 2482 MCInst TmpInst; 2483 MCInstLowering.Lower(MI, TmpInst); 2484 2485 // Stackmap shadows cannot include branch targets, so we can count the bytes 2486 // in a call towards the shadow, but must ensure that the no thread returns 2487 // in to the stackmap shadow. The only way to achieve this is if the call 2488 // is at the end of the shadow. 2489 if (MI->isCall()) { 2490 // Count then size of the call towards the shadow 2491 SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); 2492 // Then flush the shadow so that we fill with nops before the call, not 2493 // after it. 2494 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 2495 // Then emit the call 2496 OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); 2497 return; 2498 } 2499 2500 EmitAndCountInstruction(TmpInst); 2501 } 2502