1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains code to lower X86 MachineInstrs to their corresponding 10 // MCInst records. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/X86ATTInstPrinter.h" 15 #include "MCTargetDesc/X86BaseInfo.h" 16 #include "MCTargetDesc/X86InstComments.h" 17 #include "MCTargetDesc/X86TargetStreamer.h" 18 #include "Utils/X86ShuffleDecode.h" 19 #include "X86AsmPrinter.h" 20 #include "X86RegisterInfo.h" 21 #include "X86ShuffleDecodeConstantPool.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/SmallString.h" 24 #include "llvm/ADT/iterator_range.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineModuleInfoImpls.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/StackMaps.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/GlobalValue.h" 32 #include "llvm/IR/Mangler.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/MC/MCCodeEmitter.h" 35 #include "llvm/MC/MCContext.h" 36 #include "llvm/MC/MCExpr.h" 37 #include "llvm/MC/MCFixup.h" 38 #include "llvm/MC/MCInst.h" 39 #include "llvm/MC/MCInstBuilder.h" 40 #include "llvm/MC/MCSection.h" 41 #include "llvm/MC/MCSectionELF.h" 42 #include "llvm/MC/MCStreamer.h" 43 #include "llvm/MC/MCSymbol.h" 44 #include "llvm/MC/MCSymbolELF.h" 45 #include "llvm/Target/TargetLoweringObjectFile.h" 46 47 using namespace llvm; 48 49 namespace { 50 51 /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. 52 class X86MCInstLower { 53 MCContext &Ctx; 54 const MachineFunction &MF; 55 const TargetMachine &TM; 56 const MCAsmInfo &MAI; 57 X86AsmPrinter &AsmPrinter; 58 59 public: 60 X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter); 61 62 Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI, 63 const MachineOperand &MO) const; 64 void Lower(const MachineInstr *MI, MCInst &OutMI) const; 65 66 MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; 67 MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; 68 69 private: 70 MachineModuleInfoMachO &getMachOMMI() const; 71 }; 72 73 } // end anonymous namespace 74 75 // Emit a minimal sequence of nops spanning NumBytes bytes. 76 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 77 const MCSubtargetInfo &STI); 78 79 void X86AsmPrinter::StackMapShadowTracker::count(MCInst &Inst, 80 const MCSubtargetInfo &STI, 81 MCCodeEmitter *CodeEmitter) { 82 if (InShadow) { 83 SmallString<256> Code; 84 SmallVector<MCFixup, 4> Fixups; 85 raw_svector_ostream VecOS(Code); 86 CodeEmitter->encodeInstruction(Inst, VecOS, Fixups, STI); 87 CurrentShadowSize += Code.size(); 88 if (CurrentShadowSize >= RequiredShadowSize) 89 InShadow = false; // The shadow is big enough. Stop counting. 90 } 91 } 92 93 void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding( 94 MCStreamer &OutStreamer, const MCSubtargetInfo &STI) { 95 if (InShadow && CurrentShadowSize < RequiredShadowSize) { 96 InShadow = false; 97 EmitNops(OutStreamer, RequiredShadowSize - CurrentShadowSize, 98 MF->getSubtarget<X86Subtarget>().is64Bit(), STI); 99 } 100 } 101 102 void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) { 103 OutStreamer->EmitInstruction(Inst, getSubtargetInfo()); 104 SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get()); 105 } 106 107 X86MCInstLower::X86MCInstLower(const MachineFunction &mf, 108 X86AsmPrinter &asmprinter) 109 : Ctx(mf.getContext()), MF(mf), TM(mf.getTarget()), MAI(*TM.getMCAsmInfo()), 110 AsmPrinter(asmprinter) {} 111 112 MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { 113 return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); 114 } 115 116 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol 117 /// operand to an MCSymbol. 118 MCSymbol *X86MCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const { 119 const DataLayout &DL = MF.getDataLayout(); 120 assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && 121 "Isn't a symbol reference"); 122 123 MCSymbol *Sym = nullptr; 124 SmallString<128> Name; 125 StringRef Suffix; 126 127 switch (MO.getTargetFlags()) { 128 case X86II::MO_DLLIMPORT: 129 // Handle dllimport linkage. 130 Name += "__imp_"; 131 break; 132 case X86II::MO_COFFSTUB: 133 Name += ".refptr."; 134 break; 135 case X86II::MO_DARWIN_NONLAZY: 136 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 137 Suffix = "$non_lazy_ptr"; 138 break; 139 } 140 141 if (!Suffix.empty()) 142 Name += DL.getPrivateGlobalPrefix(); 143 144 if (MO.isGlobal()) { 145 const GlobalValue *GV = MO.getGlobal(); 146 AsmPrinter.getNameWithPrefix(Name, GV); 147 } else if (MO.isSymbol()) { 148 Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL); 149 } else if (MO.isMBB()) { 150 assert(Suffix.empty()); 151 Sym = MO.getMBB()->getSymbol(); 152 } 153 154 Name += Suffix; 155 if (!Sym) 156 Sym = Ctx.getOrCreateSymbol(Name); 157 158 // If the target flags on the operand changes the name of the symbol, do that 159 // before we return the symbol. 160 switch (MO.getTargetFlags()) { 161 default: 162 break; 163 case X86II::MO_COFFSTUB: { 164 MachineModuleInfoCOFF &MMICOFF = 165 MF.getMMI().getObjFileInfo<MachineModuleInfoCOFF>(); 166 MachineModuleInfoImpl::StubValueTy &StubSym = MMICOFF.getGVStubEntry(Sym); 167 if (!StubSym.getPointer()) { 168 assert(MO.isGlobal() && "Extern symbol not handled yet"); 169 StubSym = MachineModuleInfoImpl::StubValueTy( 170 AsmPrinter.getSymbol(MO.getGlobal()), true); 171 } 172 break; 173 } 174 case X86II::MO_DARWIN_NONLAZY: 175 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: { 176 MachineModuleInfoImpl::StubValueTy &StubSym = 177 getMachOMMI().getGVStubEntry(Sym); 178 if (!StubSym.getPointer()) { 179 assert(MO.isGlobal() && "Extern symbol not handled yet"); 180 StubSym = MachineModuleInfoImpl::StubValueTy( 181 AsmPrinter.getSymbol(MO.getGlobal()), 182 !MO.getGlobal()->hasInternalLinkage()); 183 } 184 break; 185 } 186 } 187 188 return Sym; 189 } 190 191 MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, 192 MCSymbol *Sym) const { 193 // FIXME: We would like an efficient form for this, so we don't have to do a 194 // lot of extra uniquing. 195 const MCExpr *Expr = nullptr; 196 MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None; 197 198 switch (MO.getTargetFlags()) { 199 default: 200 llvm_unreachable("Unknown target flag on GV operand"); 201 case X86II::MO_NO_FLAG: // No flag. 202 // These affect the name of the symbol, not any suffix. 203 case X86II::MO_DARWIN_NONLAZY: 204 case X86II::MO_DLLIMPORT: 205 case X86II::MO_COFFSTUB: 206 break; 207 208 case X86II::MO_TLVP: 209 RefKind = MCSymbolRefExpr::VK_TLVP; 210 break; 211 case X86II::MO_TLVP_PIC_BASE: 212 Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx); 213 // Subtract the pic base. 214 Expr = MCBinaryExpr::createSub( 215 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); 216 break; 217 case X86II::MO_SECREL: 218 RefKind = MCSymbolRefExpr::VK_SECREL; 219 break; 220 case X86II::MO_TLSGD: 221 RefKind = MCSymbolRefExpr::VK_TLSGD; 222 break; 223 case X86II::MO_TLSLD: 224 RefKind = MCSymbolRefExpr::VK_TLSLD; 225 break; 226 case X86II::MO_TLSLDM: 227 RefKind = MCSymbolRefExpr::VK_TLSLDM; 228 break; 229 case X86II::MO_GOTTPOFF: 230 RefKind = MCSymbolRefExpr::VK_GOTTPOFF; 231 break; 232 case X86II::MO_INDNTPOFF: 233 RefKind = MCSymbolRefExpr::VK_INDNTPOFF; 234 break; 235 case X86II::MO_TPOFF: 236 RefKind = MCSymbolRefExpr::VK_TPOFF; 237 break; 238 case X86II::MO_DTPOFF: 239 RefKind = MCSymbolRefExpr::VK_DTPOFF; 240 break; 241 case X86II::MO_NTPOFF: 242 RefKind = MCSymbolRefExpr::VK_NTPOFF; 243 break; 244 case X86II::MO_GOTNTPOFF: 245 RefKind = MCSymbolRefExpr::VK_GOTNTPOFF; 246 break; 247 case X86II::MO_GOTPCREL: 248 RefKind = MCSymbolRefExpr::VK_GOTPCREL; 249 break; 250 case X86II::MO_GOT: 251 RefKind = MCSymbolRefExpr::VK_GOT; 252 break; 253 case X86II::MO_GOTOFF: 254 RefKind = MCSymbolRefExpr::VK_GOTOFF; 255 break; 256 case X86II::MO_PLT: 257 RefKind = MCSymbolRefExpr::VK_PLT; 258 break; 259 case X86II::MO_ABS8: 260 RefKind = MCSymbolRefExpr::VK_X86_ABS8; 261 break; 262 case X86II::MO_PIC_BASE_OFFSET: 263 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: 264 Expr = MCSymbolRefExpr::create(Sym, Ctx); 265 // Subtract the pic base. 266 Expr = MCBinaryExpr::createSub( 267 Expr, MCSymbolRefExpr::create(MF.getPICBaseSymbol(), Ctx), Ctx); 268 if (MO.isJTI()) { 269 assert(MAI.doesSetDirectiveSuppressReloc()); 270 // If .set directive is supported, use it to reduce the number of 271 // relocations the assembler will generate for differences between 272 // local labels. This is only safe when the symbols are in the same 273 // section so we are restricting it to jumptable references. 274 MCSymbol *Label = Ctx.createTempSymbol(); 275 AsmPrinter.OutStreamer->EmitAssignment(Label, Expr); 276 Expr = MCSymbolRefExpr::create(Label, Ctx); 277 } 278 break; 279 } 280 281 if (!Expr) 282 Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx); 283 284 if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) 285 Expr = MCBinaryExpr::createAdd( 286 Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); 287 return MCOperand::createExpr(Expr); 288 } 289 290 /// Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with 291 /// a short fixed-register form. 292 static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { 293 unsigned ImmOp = Inst.getNumOperands() - 1; 294 assert(Inst.getOperand(0).isReg() && 295 (Inst.getOperand(ImmOp).isImm() || Inst.getOperand(ImmOp).isExpr()) && 296 ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() && 297 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) || 298 Inst.getNumOperands() == 2) && 299 "Unexpected instruction!"); 300 301 // Check whether the destination register can be fixed. 302 unsigned Reg = Inst.getOperand(0).getReg(); 303 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 304 return; 305 306 // If so, rewrite the instruction. 307 MCOperand Saved = Inst.getOperand(ImmOp); 308 Inst = MCInst(); 309 Inst.setOpcode(Opcode); 310 Inst.addOperand(Saved); 311 } 312 313 /// If a movsx instruction has a shorter encoding for the used register 314 /// simplify the instruction to use it instead. 315 static void SimplifyMOVSX(MCInst &Inst) { 316 unsigned NewOpcode = 0; 317 unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg(); 318 switch (Inst.getOpcode()) { 319 default: 320 llvm_unreachable("Unexpected instruction!"); 321 case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw 322 if (Op0 == X86::AX && Op1 == X86::AL) 323 NewOpcode = X86::CBW; 324 break; 325 case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl 326 if (Op0 == X86::EAX && Op1 == X86::AX) 327 NewOpcode = X86::CWDE; 328 break; 329 case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq 330 if (Op0 == X86::RAX && Op1 == X86::EAX) 331 NewOpcode = X86::CDQE; 332 break; 333 } 334 335 if (NewOpcode != 0) { 336 Inst = MCInst(); 337 Inst.setOpcode(NewOpcode); 338 } 339 } 340 341 /// Simplify things like MOV32rm to MOV32o32a. 342 static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, 343 unsigned Opcode) { 344 // Don't make these simplifications in 64-bit mode; other assemblers don't 345 // perform them because they make the code larger. 346 if (Printer.getSubtarget().is64Bit()) 347 return; 348 349 bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); 350 unsigned AddrBase = IsStore; 351 unsigned RegOp = IsStore ? 0 : 5; 352 unsigned AddrOp = AddrBase + 3; 353 assert( 354 Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && 355 Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && 356 Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && 357 Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && 358 Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && 359 (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && 360 "Unexpected instruction!"); 361 362 // Check whether the destination register can be fixed. 363 unsigned Reg = Inst.getOperand(RegOp).getReg(); 364 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) 365 return; 366 367 // Check whether this is an absolute address. 368 // FIXME: We know TLVP symbol refs aren't, but there should be a better way 369 // to do this here. 370 bool Absolute = true; 371 if (Inst.getOperand(AddrOp).isExpr()) { 372 const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); 373 if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE)) 374 if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) 375 Absolute = false; 376 } 377 378 if (Absolute && 379 (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || 380 Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || 381 Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) 382 return; 383 384 // If so, rewrite the instruction. 385 MCOperand Saved = Inst.getOperand(AddrOp); 386 MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); 387 Inst = MCInst(); 388 Inst.setOpcode(Opcode); 389 Inst.addOperand(Saved); 390 Inst.addOperand(Seg); 391 } 392 393 static unsigned getRetOpcode(const X86Subtarget &Subtarget) { 394 return Subtarget.is64Bit() ? X86::RETQ : X86::RETL; 395 } 396 397 Optional<MCOperand> 398 X86MCInstLower::LowerMachineOperand(const MachineInstr *MI, 399 const MachineOperand &MO) const { 400 switch (MO.getType()) { 401 default: 402 MI->print(errs()); 403 llvm_unreachable("unknown operand type"); 404 case MachineOperand::MO_Register: 405 // Ignore all implicit register operands. 406 if (MO.isImplicit()) 407 return None; 408 return MCOperand::createReg(MO.getReg()); 409 case MachineOperand::MO_Immediate: 410 return MCOperand::createImm(MO.getImm()); 411 case MachineOperand::MO_MachineBasicBlock: 412 case MachineOperand::MO_GlobalAddress: 413 case MachineOperand::MO_ExternalSymbol: 414 return LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); 415 case MachineOperand::MO_MCSymbol: 416 return LowerSymbolOperand(MO, MO.getMCSymbol()); 417 case MachineOperand::MO_JumpTableIndex: 418 return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex())); 419 case MachineOperand::MO_ConstantPoolIndex: 420 return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex())); 421 case MachineOperand::MO_BlockAddress: 422 return LowerSymbolOperand( 423 MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); 424 case MachineOperand::MO_RegisterMask: 425 // Ignore call clobbers. 426 return None; 427 } 428 } 429 430 void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { 431 OutMI.setOpcode(MI->getOpcode()); 432 433 for (const MachineOperand &MO : MI->operands()) 434 if (auto MaybeMCOp = LowerMachineOperand(MI, MO)) 435 OutMI.addOperand(MaybeMCOp.getValue()); 436 437 // Handle a few special cases to eliminate operand modifiers. 438 switch (OutMI.getOpcode()) { 439 case X86::LEA64_32r: 440 case X86::LEA64r: 441 case X86::LEA16r: 442 case X86::LEA32r: 443 // LEA should have a segment register, but it must be empty. 444 assert(OutMI.getNumOperands() == 1 + X86::AddrNumOperands && 445 "Unexpected # of LEA operands"); 446 assert(OutMI.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && 447 "LEA has segment specified!"); 448 break; 449 450 // Commute operands to get a smaller encoding by using VEX.R instead of VEX.B 451 // if one of the registers is extended, but other isn't. 452 case X86::VMOVZPQILo2PQIrr: 453 case X86::VMOVAPDrr: 454 case X86::VMOVAPDYrr: 455 case X86::VMOVAPSrr: 456 case X86::VMOVAPSYrr: 457 case X86::VMOVDQArr: 458 case X86::VMOVDQAYrr: 459 case X86::VMOVDQUrr: 460 case X86::VMOVDQUYrr: 461 case X86::VMOVUPDrr: 462 case X86::VMOVUPDYrr: 463 case X86::VMOVUPSrr: 464 case X86::VMOVUPSYrr: { 465 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 466 X86II::isX86_64ExtendedReg(OutMI.getOperand(1).getReg())) { 467 unsigned NewOpc; 468 switch (OutMI.getOpcode()) { 469 default: llvm_unreachable("Invalid opcode"); 470 case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break; 471 case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break; 472 case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break; 473 case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break; 474 case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break; 475 case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break; 476 case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break; 477 case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break; 478 case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break; 479 case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break; 480 case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break; 481 case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break; 482 case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break; 483 } 484 OutMI.setOpcode(NewOpc); 485 } 486 break; 487 } 488 case X86::VMOVSDrr: 489 case X86::VMOVSSrr: { 490 if (!X86II::isX86_64ExtendedReg(OutMI.getOperand(0).getReg()) && 491 X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) { 492 unsigned NewOpc; 493 switch (OutMI.getOpcode()) { 494 default: llvm_unreachable("Invalid opcode"); 495 case X86::VMOVSDrr: NewOpc = X86::VMOVSDrr_REV; break; 496 case X86::VMOVSSrr: NewOpc = X86::VMOVSSrr_REV; break; 497 } 498 OutMI.setOpcode(NewOpc); 499 } 500 break; 501 } 502 503 // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have register 504 // inputs modeled as normal uses instead of implicit uses. As such, truncate 505 // off all but the first operand (the callee). FIXME: Change isel. 506 case X86::TAILJMPr64: 507 case X86::TAILJMPr64_REX: 508 case X86::CALL64r: 509 case X86::CALL64pcrel32: { 510 unsigned Opcode = OutMI.getOpcode(); 511 MCOperand Saved = OutMI.getOperand(0); 512 OutMI = MCInst(); 513 OutMI.setOpcode(Opcode); 514 OutMI.addOperand(Saved); 515 break; 516 } 517 518 case X86::EH_RETURN: 519 case X86::EH_RETURN64: { 520 OutMI = MCInst(); 521 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 522 break; 523 } 524 525 case X86::CLEANUPRET: { 526 // Replace CLEANUPRET with the appropriate RET. 527 OutMI = MCInst(); 528 OutMI.setOpcode(getRetOpcode(AsmPrinter.getSubtarget())); 529 break; 530 } 531 532 case X86::CATCHRET: { 533 // Replace CATCHRET with the appropriate RET. 534 const X86Subtarget &Subtarget = AsmPrinter.getSubtarget(); 535 unsigned ReturnReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; 536 OutMI = MCInst(); 537 OutMI.setOpcode(getRetOpcode(Subtarget)); 538 OutMI.addOperand(MCOperand::createReg(ReturnReg)); 539 break; 540 } 541 542 // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump 543 // instruction. 544 { 545 unsigned Opcode; 546 case X86::TAILJMPr: 547 Opcode = X86::JMP32r; 548 goto SetTailJmpOpcode; 549 case X86::TAILJMPd: 550 case X86::TAILJMPd64: 551 Opcode = X86::JMP_1; 552 goto SetTailJmpOpcode; 553 554 SetTailJmpOpcode: 555 MCOperand Saved = OutMI.getOperand(0); 556 OutMI = MCInst(); 557 OutMI.setOpcode(Opcode); 558 OutMI.addOperand(Saved); 559 break; 560 } 561 562 case X86::TAILJMPd_CC: 563 case X86::TAILJMPd64_CC: { 564 MCOperand Saved = OutMI.getOperand(0); 565 MCOperand Saved2 = OutMI.getOperand(1); 566 OutMI = MCInst(); 567 OutMI.setOpcode(X86::JCC_1); 568 OutMI.addOperand(Saved); 569 OutMI.addOperand(Saved2); 570 break; 571 } 572 573 case X86::DEC16r: 574 case X86::DEC32r: 575 case X86::INC16r: 576 case X86::INC32r: 577 // If we aren't in 64-bit mode we can use the 1-byte inc/dec instructions. 578 if (!AsmPrinter.getSubtarget().is64Bit()) { 579 unsigned Opcode; 580 switch (OutMI.getOpcode()) { 581 default: llvm_unreachable("Invalid opcode"); 582 case X86::DEC16r: Opcode = X86::DEC16r_alt; break; 583 case X86::DEC32r: Opcode = X86::DEC32r_alt; break; 584 case X86::INC16r: Opcode = X86::INC16r_alt; break; 585 case X86::INC32r: Opcode = X86::INC32r_alt; break; 586 } 587 OutMI.setOpcode(Opcode); 588 } 589 break; 590 591 // We don't currently select the correct instruction form for instructions 592 // which have a short %eax, etc. form. Handle this by custom lowering, for 593 // now. 594 // 595 // Note, we are currently not handling the following instructions: 596 // MOV64ao8, MOV64o8a 597 // XCHG16ar, XCHG32ar, XCHG64ar 598 case X86::MOV8mr_NOREX: 599 case X86::MOV8mr: 600 case X86::MOV8rm_NOREX: 601 case X86::MOV8rm: 602 case X86::MOV16mr: 603 case X86::MOV16rm: 604 case X86::MOV32mr: 605 case X86::MOV32rm: { 606 unsigned NewOpc; 607 switch (OutMI.getOpcode()) { 608 default: llvm_unreachable("Invalid opcode"); 609 case X86::MOV8mr_NOREX: 610 case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; 611 case X86::MOV8rm_NOREX: 612 case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; 613 case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; 614 case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; 615 case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; 616 case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; 617 } 618 SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); 619 break; 620 } 621 622 case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: 623 case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: 624 case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: 625 case X86::CMP8ri: case X86::CMP16ri: case X86::CMP32ri: case X86::CMP64ri32: 626 case X86::OR8ri: case X86::OR16ri: case X86::OR32ri: case X86::OR64ri32: 627 case X86::SBB8ri: case X86::SBB16ri: case X86::SBB32ri: case X86::SBB64ri32: 628 case X86::SUB8ri: case X86::SUB16ri: case X86::SUB32ri: case X86::SUB64ri32: 629 case X86::TEST8ri:case X86::TEST16ri:case X86::TEST32ri:case X86::TEST64ri32: 630 case X86::XOR8ri: case X86::XOR16ri: case X86::XOR32ri: case X86::XOR64ri32: { 631 unsigned NewOpc; 632 switch (OutMI.getOpcode()) { 633 default: llvm_unreachable("Invalid opcode"); 634 case X86::ADC8ri: NewOpc = X86::ADC8i8; break; 635 case X86::ADC16ri: NewOpc = X86::ADC16i16; break; 636 case X86::ADC32ri: NewOpc = X86::ADC32i32; break; 637 case X86::ADC64ri32: NewOpc = X86::ADC64i32; break; 638 case X86::ADD8ri: NewOpc = X86::ADD8i8; break; 639 case X86::ADD16ri: NewOpc = X86::ADD16i16; break; 640 case X86::ADD32ri: NewOpc = X86::ADD32i32; break; 641 case X86::ADD64ri32: NewOpc = X86::ADD64i32; break; 642 case X86::AND8ri: NewOpc = X86::AND8i8; break; 643 case X86::AND16ri: NewOpc = X86::AND16i16; break; 644 case X86::AND32ri: NewOpc = X86::AND32i32; break; 645 case X86::AND64ri32: NewOpc = X86::AND64i32; break; 646 case X86::CMP8ri: NewOpc = X86::CMP8i8; break; 647 case X86::CMP16ri: NewOpc = X86::CMP16i16; break; 648 case X86::CMP32ri: NewOpc = X86::CMP32i32; break; 649 case X86::CMP64ri32: NewOpc = X86::CMP64i32; break; 650 case X86::OR8ri: NewOpc = X86::OR8i8; break; 651 case X86::OR16ri: NewOpc = X86::OR16i16; break; 652 case X86::OR32ri: NewOpc = X86::OR32i32; break; 653 case X86::OR64ri32: NewOpc = X86::OR64i32; break; 654 case X86::SBB8ri: NewOpc = X86::SBB8i8; break; 655 case X86::SBB16ri: NewOpc = X86::SBB16i16; break; 656 case X86::SBB32ri: NewOpc = X86::SBB32i32; break; 657 case X86::SBB64ri32: NewOpc = X86::SBB64i32; break; 658 case X86::SUB8ri: NewOpc = X86::SUB8i8; break; 659 case X86::SUB16ri: NewOpc = X86::SUB16i16; break; 660 case X86::SUB32ri: NewOpc = X86::SUB32i32; break; 661 case X86::SUB64ri32: NewOpc = X86::SUB64i32; break; 662 case X86::TEST8ri: NewOpc = X86::TEST8i8; break; 663 case X86::TEST16ri: NewOpc = X86::TEST16i16; break; 664 case X86::TEST32ri: NewOpc = X86::TEST32i32; break; 665 case X86::TEST64ri32: NewOpc = X86::TEST64i32; break; 666 case X86::XOR8ri: NewOpc = X86::XOR8i8; break; 667 case X86::XOR16ri: NewOpc = X86::XOR16i16; break; 668 case X86::XOR32ri: NewOpc = X86::XOR32i32; break; 669 case X86::XOR64ri32: NewOpc = X86::XOR64i32; break; 670 } 671 SimplifyShortImmForm(OutMI, NewOpc); 672 break; 673 } 674 675 // Try to shrink some forms of movsx. 676 case X86::MOVSX16rr8: 677 case X86::MOVSX32rr16: 678 case X86::MOVSX64rr32: 679 SimplifyMOVSX(OutMI); 680 break; 681 } 682 } 683 684 void X86AsmPrinter::LowerTlsAddr(X86MCInstLower &MCInstLowering, 685 const MachineInstr &MI) { 686 bool Is64Bits = MI.getOpcode() == X86::TLS_addr64 || 687 MI.getOpcode() == X86::TLS_base_addr64; 688 MCContext &Ctx = OutStreamer->getContext(); 689 690 MCSymbolRefExpr::VariantKind SRVK; 691 switch (MI.getOpcode()) { 692 case X86::TLS_addr32: 693 case X86::TLS_addr64: 694 SRVK = MCSymbolRefExpr::VK_TLSGD; 695 break; 696 case X86::TLS_base_addr32: 697 SRVK = MCSymbolRefExpr::VK_TLSLDM; 698 break; 699 case X86::TLS_base_addr64: 700 SRVK = MCSymbolRefExpr::VK_TLSLD; 701 break; 702 default: 703 llvm_unreachable("unexpected opcode"); 704 } 705 706 const MCSymbolRefExpr *Sym = MCSymbolRefExpr::create( 707 MCInstLowering.GetSymbolFromOperand(MI.getOperand(3)), SRVK, Ctx); 708 709 // As of binutils 2.32, ld has a bogus TLS relaxation error when the GD/LD 710 // code sequence using R_X86_64_GOTPCREL (instead of R_X86_64_GOTPCRELX) is 711 // attempted to be relaxed to IE/LE (binutils PR24784). Work around the bug by 712 // only using GOT when GOTPCRELX is enabled. 713 // TODO Delete the workaround when GOTPCRELX becomes commonplace. 714 bool UseGot = MMI->getModule()->getRtLibUseGOT() && 715 Ctx.getAsmInfo()->canRelaxRelocations(); 716 717 if (Is64Bits) { 718 bool NeedsPadding = SRVK == MCSymbolRefExpr::VK_TLSGD; 719 if (NeedsPadding) 720 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 721 EmitAndCountInstruction(MCInstBuilder(X86::LEA64r) 722 .addReg(X86::RDI) 723 .addReg(X86::RIP) 724 .addImm(1) 725 .addReg(0) 726 .addExpr(Sym) 727 .addReg(0)); 728 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("__tls_get_addr"); 729 if (NeedsPadding) { 730 if (!UseGot) 731 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 732 EmitAndCountInstruction(MCInstBuilder(X86::DATA16_PREFIX)); 733 EmitAndCountInstruction(MCInstBuilder(X86::REX64_PREFIX)); 734 } 735 if (UseGot) { 736 const MCExpr *Expr = MCSymbolRefExpr::create( 737 TlsGetAddr, MCSymbolRefExpr::VK_GOTPCREL, Ctx); 738 EmitAndCountInstruction(MCInstBuilder(X86::CALL64m) 739 .addReg(X86::RIP) 740 .addImm(1) 741 .addReg(0) 742 .addExpr(Expr) 743 .addReg(0)); 744 } else { 745 EmitAndCountInstruction( 746 MCInstBuilder(X86::CALL64pcrel32) 747 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, 748 MCSymbolRefExpr::VK_PLT, Ctx))); 749 } 750 } else { 751 if (SRVK == MCSymbolRefExpr::VK_TLSGD && !UseGot) { 752 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r) 753 .addReg(X86::EAX) 754 .addReg(0) 755 .addImm(1) 756 .addReg(X86::EBX) 757 .addExpr(Sym) 758 .addReg(0)); 759 } else { 760 EmitAndCountInstruction(MCInstBuilder(X86::LEA32r) 761 .addReg(X86::EAX) 762 .addReg(X86::EBX) 763 .addImm(1) 764 .addReg(0) 765 .addExpr(Sym) 766 .addReg(0)); 767 } 768 769 const MCSymbol *TlsGetAddr = Ctx.getOrCreateSymbol("___tls_get_addr"); 770 if (UseGot) { 771 const MCExpr *Expr = 772 MCSymbolRefExpr::create(TlsGetAddr, MCSymbolRefExpr::VK_GOT, Ctx); 773 EmitAndCountInstruction(MCInstBuilder(X86::CALL32m) 774 .addReg(X86::EBX) 775 .addImm(1) 776 .addReg(0) 777 .addExpr(Expr) 778 .addReg(0)); 779 } else { 780 EmitAndCountInstruction( 781 MCInstBuilder(X86::CALLpcrel32) 782 .addExpr(MCSymbolRefExpr::create(TlsGetAddr, 783 MCSymbolRefExpr::VK_PLT, Ctx))); 784 } 785 } 786 } 787 788 /// Emit the largest nop instruction smaller than or equal to \p NumBytes 789 /// bytes. Return the size of nop emitted. 790 static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 791 const MCSubtargetInfo &STI) { 792 // This works only for 64bit. For 32bit we have to do additional checking if 793 // the CPU supports multi-byte nops. 794 assert(Is64Bit && "EmitNops only supports X86-64"); 795 796 unsigned NopSize; 797 unsigned Opc, BaseReg, ScaleVal, IndexReg, Displacement, SegmentReg; 798 IndexReg = Displacement = SegmentReg = 0; 799 BaseReg = X86::RAX; 800 ScaleVal = 1; 801 switch (NumBytes) { 802 case 0: 803 llvm_unreachable("Zero nops?"); 804 break; 805 case 1: 806 NopSize = 1; 807 Opc = X86::NOOP; 808 break; 809 case 2: 810 NopSize = 2; 811 Opc = X86::XCHG16ar; 812 break; 813 case 3: 814 NopSize = 3; 815 Opc = X86::NOOPL; 816 break; 817 case 4: 818 NopSize = 4; 819 Opc = X86::NOOPL; 820 Displacement = 8; 821 break; 822 case 5: 823 NopSize = 5; 824 Opc = X86::NOOPL; 825 Displacement = 8; 826 IndexReg = X86::RAX; 827 break; 828 case 6: 829 NopSize = 6; 830 Opc = X86::NOOPW; 831 Displacement = 8; 832 IndexReg = X86::RAX; 833 break; 834 case 7: 835 NopSize = 7; 836 Opc = X86::NOOPL; 837 Displacement = 512; 838 break; 839 case 8: 840 NopSize = 8; 841 Opc = X86::NOOPL; 842 Displacement = 512; 843 IndexReg = X86::RAX; 844 break; 845 case 9: 846 NopSize = 9; 847 Opc = X86::NOOPW; 848 Displacement = 512; 849 IndexReg = X86::RAX; 850 break; 851 default: 852 NopSize = 10; 853 Opc = X86::NOOPW; 854 Displacement = 512; 855 IndexReg = X86::RAX; 856 SegmentReg = X86::CS; 857 break; 858 } 859 860 unsigned NumPrefixes = std::min(NumBytes - NopSize, 5U); 861 NopSize += NumPrefixes; 862 for (unsigned i = 0; i != NumPrefixes; ++i) 863 OS.EmitBytes("\x66"); 864 865 switch (Opc) { 866 default: llvm_unreachable("Unexpected opcode"); 867 case X86::NOOP: 868 OS.EmitInstruction(MCInstBuilder(Opc), STI); 869 break; 870 case X86::XCHG16ar: 871 OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX), STI); 872 break; 873 case X86::NOOPL: 874 case X86::NOOPW: 875 OS.EmitInstruction(MCInstBuilder(Opc) 876 .addReg(BaseReg) 877 .addImm(ScaleVal) 878 .addReg(IndexReg) 879 .addImm(Displacement) 880 .addReg(SegmentReg), 881 STI); 882 break; 883 } 884 assert(NopSize <= NumBytes && "We overemitted?"); 885 return NopSize; 886 } 887 888 /// Emit the optimal amount of multi-byte nops on X86. 889 static void EmitNops(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, 890 const MCSubtargetInfo &STI) { 891 unsigned NopsToEmit = NumBytes; 892 (void)NopsToEmit; 893 while (NumBytes) { 894 NumBytes -= EmitNop(OS, NumBytes, Is64Bit, STI); 895 assert(NopsToEmit >= NumBytes && "Emitted more than I asked for!"); 896 } 897 } 898 899 void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, 900 X86MCInstLower &MCIL) { 901 assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64"); 902 903 StatepointOpers SOpers(&MI); 904 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { 905 EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), 906 getSubtargetInfo()); 907 } else { 908 // Lower call target and choose correct opcode 909 const MachineOperand &CallTarget = SOpers.getCallTarget(); 910 MCOperand CallTargetMCOp; 911 unsigned CallOpcode; 912 switch (CallTarget.getType()) { 913 case MachineOperand::MO_GlobalAddress: 914 case MachineOperand::MO_ExternalSymbol: 915 CallTargetMCOp = MCIL.LowerSymbolOperand( 916 CallTarget, MCIL.GetSymbolFromOperand(CallTarget)); 917 CallOpcode = X86::CALL64pcrel32; 918 // Currently, we only support relative addressing with statepoints. 919 // Otherwise, we'll need a scratch register to hold the target 920 // address. You'll fail asserts during load & relocation if this 921 // symbol is to far away. (TODO: support non-relative addressing) 922 break; 923 case MachineOperand::MO_Immediate: 924 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); 925 CallOpcode = X86::CALL64pcrel32; 926 // Currently, we only support relative addressing with statepoints. 927 // Otherwise, we'll need a scratch register to hold the target 928 // immediate. You'll fail asserts during load & relocation if this 929 // address is to far away. (TODO: support non-relative addressing) 930 break; 931 case MachineOperand::MO_Register: 932 // FIXME: Add retpoline support and remove this. 933 if (Subtarget->useRetpolineIndirectCalls()) 934 report_fatal_error("Lowering register statepoints with retpoline not " 935 "yet implemented."); 936 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); 937 CallOpcode = X86::CALL64r; 938 break; 939 default: 940 llvm_unreachable("Unsupported operand type in statepoint call target"); 941 break; 942 } 943 944 // Emit call 945 MCInst CallInst; 946 CallInst.setOpcode(CallOpcode); 947 CallInst.addOperand(CallTargetMCOp); 948 OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); 949 } 950 951 // Record our statepoint node in the same section used by STACKMAP 952 // and PATCHPOINT 953 SM.recordStatepoint(MI); 954 } 955 956 void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, 957 X86MCInstLower &MCIL) { 958 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>, 959 // <opcode>, <operands> 960 961 unsigned DefRegister = FaultingMI.getOperand(0).getReg(); 962 FaultMaps::FaultKind FK = 963 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm()); 964 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); 965 unsigned Opcode = FaultingMI.getOperand(3).getImm(); 966 unsigned OperandsBeginIdx = 4; 967 968 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!"); 969 FM.recordFaultingOp(FK, HandlerLabel); 970 971 MCInst MI; 972 MI.setOpcode(Opcode); 973 974 if (DefRegister != X86::NoRegister) 975 MI.addOperand(MCOperand::createReg(DefRegister)); 976 977 for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, 978 E = FaultingMI.operands_end(); 979 I != E; ++I) 980 if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) 981 MI.addOperand(MaybeOperand.getValue()); 982 983 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName()); 984 OutStreamer->EmitInstruction(MI, getSubtargetInfo()); 985 } 986 987 void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, 988 X86MCInstLower &MCIL) { 989 bool Is64Bits = Subtarget->is64Bit(); 990 MCContext &Ctx = OutStreamer->getContext(); 991 MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__"); 992 const MCSymbolRefExpr *Op = 993 MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx); 994 995 EmitAndCountInstruction( 996 MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32) 997 .addExpr(Op)); 998 } 999 1000 void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI, 1001 X86MCInstLower &MCIL) { 1002 // PATCHABLE_OP minsize, opcode, operands 1003 1004 unsigned MinSize = MI.getOperand(0).getImm(); 1005 unsigned Opcode = MI.getOperand(1).getImm(); 1006 1007 MCInst MCI; 1008 MCI.setOpcode(Opcode); 1009 for (auto &MO : make_range(MI.operands_begin() + 2, MI.operands_end())) 1010 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1011 MCI.addOperand(MaybeOperand.getValue()); 1012 1013 SmallString<256> Code; 1014 SmallVector<MCFixup, 4> Fixups; 1015 raw_svector_ostream VecOS(Code); 1016 CodeEmitter->encodeInstruction(MCI, VecOS, Fixups, getSubtargetInfo()); 1017 1018 if (Code.size() < MinSize) { 1019 if (MinSize == 2 && Opcode == X86::PUSH64r) { 1020 // This is an optimization that lets us get away without emitting a nop in 1021 // many cases. 1022 // 1023 // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two 1024 // bytes too, so the check on MinSize is important. 1025 MCI.setOpcode(X86::PUSH64rmr); 1026 } else { 1027 unsigned NopSize = EmitNop(*OutStreamer, MinSize, Subtarget->is64Bit(), 1028 getSubtargetInfo()); 1029 assert(NopSize == MinSize && "Could not implement MinSize!"); 1030 (void)NopSize; 1031 } 1032 } 1033 1034 OutStreamer->EmitInstruction(MCI, getSubtargetInfo()); 1035 } 1036 1037 // Lower a stackmap of the form: 1038 // <id>, <shadowBytes>, ... 1039 void X86AsmPrinter::LowerSTACKMAP(const MachineInstr &MI) { 1040 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 1041 SM.recordStackMap(MI); 1042 unsigned NumShadowBytes = MI.getOperand(1).getImm(); 1043 SMShadowTracker.reset(NumShadowBytes); 1044 } 1045 1046 // Lower a patchpoint of the form: 1047 // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ... 1048 void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI, 1049 X86MCInstLower &MCIL) { 1050 assert(Subtarget->is64Bit() && "Patchpoint currently only supports X86-64"); 1051 1052 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 1053 1054 SM.recordPatchPoint(MI); 1055 1056 PatchPointOpers opers(&MI); 1057 unsigned ScratchIdx = opers.getNextScratchIdx(); 1058 unsigned EncodedBytes = 0; 1059 const MachineOperand &CalleeMO = opers.getCallTarget(); 1060 1061 // Check for null target. If target is non-null (i.e. is non-zero or is 1062 // symbolic) then emit a call. 1063 if (!(CalleeMO.isImm() && !CalleeMO.getImm())) { 1064 MCOperand CalleeMCOp; 1065 switch (CalleeMO.getType()) { 1066 default: 1067 /// FIXME: Add a verifier check for bad callee types. 1068 llvm_unreachable("Unrecognized callee operand type."); 1069 case MachineOperand::MO_Immediate: 1070 if (CalleeMO.getImm()) 1071 CalleeMCOp = MCOperand::createImm(CalleeMO.getImm()); 1072 break; 1073 case MachineOperand::MO_ExternalSymbol: 1074 case MachineOperand::MO_GlobalAddress: 1075 CalleeMCOp = MCIL.LowerSymbolOperand(CalleeMO, 1076 MCIL.GetSymbolFromOperand(CalleeMO)); 1077 break; 1078 } 1079 1080 // Emit MOV to materialize the target address and the CALL to target. 1081 // This is encoded with 12-13 bytes, depending on which register is used. 1082 unsigned ScratchReg = MI.getOperand(ScratchIdx).getReg(); 1083 if (X86II::isX86_64ExtendedReg(ScratchReg)) 1084 EncodedBytes = 13; 1085 else 1086 EncodedBytes = 12; 1087 1088 EmitAndCountInstruction( 1089 MCInstBuilder(X86::MOV64ri).addReg(ScratchReg).addOperand(CalleeMCOp)); 1090 // FIXME: Add retpoline support and remove this. 1091 if (Subtarget->useRetpolineIndirectCalls()) 1092 report_fatal_error( 1093 "Lowering patchpoint with retpoline not yet implemented."); 1094 EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(ScratchReg)); 1095 } 1096 1097 // Emit padding. 1098 unsigned NumBytes = opers.getNumPatchBytes(); 1099 assert(NumBytes >= EncodedBytes && 1100 "Patchpoint can't request size less than the length of a call."); 1101 1102 EmitNops(*OutStreamer, NumBytes - EncodedBytes, Subtarget->is64Bit(), 1103 getSubtargetInfo()); 1104 } 1105 1106 void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, 1107 X86MCInstLower &MCIL) { 1108 assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64"); 1109 1110 // We want to emit the following pattern, which follows the x86 calling 1111 // convention to prepare for the trampoline call to be patched in. 1112 // 1113 // .p2align 1, ... 1114 // .Lxray_event_sled_N: 1115 // jmp +N // jump across the instrumentation sled 1116 // ... // set up arguments in register 1117 // callq __xray_CustomEvent@plt // force dependency to symbol 1118 // ... 1119 // <jump here> 1120 // 1121 // After patching, it would look something like: 1122 // 1123 // nopw (2-byte nop) 1124 // ... 1125 // callq __xrayCustomEvent // already lowered 1126 // ... 1127 // 1128 // --- 1129 // First we emit the label and the jump. 1130 auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true); 1131 OutStreamer->AddComment("# XRay Custom Event Log"); 1132 OutStreamer->EmitCodeAlignment(2); 1133 OutStreamer->EmitLabel(CurSled); 1134 1135 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1136 // an operand (computed as an offset from the jmp instruction). 1137 // FIXME: Find another less hacky way do force the relative jump. 1138 OutStreamer->EmitBinaryData("\xeb\x0f"); 1139 1140 // The default C calling convention will place two arguments into %rcx and 1141 // %rdx -- so we only work with those. 1142 unsigned DestRegs[] = {X86::RDI, X86::RSI}; 1143 bool UsedMask[] = {false, false}; 1144 // Filled out in loop. 1145 unsigned SrcRegs[] = {0, 0}; 1146 1147 // Then we put the operands in the %rdi and %rsi registers. We spill the 1148 // values in the register before we clobber them, and mark them as used in 1149 // UsedMask. In case the arguments are already in the correct register, we use 1150 // emit nops appropriately sized to keep the sled the same size in every 1151 // situation. 1152 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1153 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { 1154 assert(Op->isReg() && "Only support arguments in registers"); 1155 SrcRegs[I] = Op->getReg(); 1156 if (SrcRegs[I] != DestRegs[I]) { 1157 UsedMask[I] = true; 1158 EmitAndCountInstruction( 1159 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); 1160 } else { 1161 EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); 1162 } 1163 } 1164 1165 // Now that the register values are stashed, mov arguments into place. 1166 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1167 if (SrcRegs[I] != DestRegs[I]) 1168 EmitAndCountInstruction( 1169 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); 1170 1171 // We emit a hard dependency on the __xray_CustomEvent symbol, which is the 1172 // name of the trampoline to be implemented by the XRay runtime. 1173 auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent"); 1174 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); 1175 if (isPositionIndependent()) 1176 TOp.setTargetFlags(X86II::MO_PLT); 1177 1178 // Emit the call instruction. 1179 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) 1180 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); 1181 1182 // Restore caller-saved and used registers. 1183 for (unsigned I = sizeof UsedMask; I-- > 0;) 1184 if (UsedMask[I]) 1185 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); 1186 else 1187 EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); 1188 1189 OutStreamer->AddComment("xray custom event end."); 1190 1191 // Record the sled version. Older versions of this sled were spelled 1192 // differently, so we let the runtime handle the different offsets we're 1193 // using. 1194 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 1); 1195 } 1196 1197 void X86AsmPrinter::LowerPATCHABLE_TYPED_EVENT_CALL(const MachineInstr &MI, 1198 X86MCInstLower &MCIL) { 1199 assert(Subtarget->is64Bit() && "XRay typed events only supports X86-64"); 1200 1201 // We want to emit the following pattern, which follows the x86 calling 1202 // convention to prepare for the trampoline call to be patched in. 1203 // 1204 // .p2align 1, ... 1205 // .Lxray_event_sled_N: 1206 // jmp +N // jump across the instrumentation sled 1207 // ... // set up arguments in register 1208 // callq __xray_TypedEvent@plt // force dependency to symbol 1209 // ... 1210 // <jump here> 1211 // 1212 // After patching, it would look something like: 1213 // 1214 // nopw (2-byte nop) 1215 // ... 1216 // callq __xrayTypedEvent // already lowered 1217 // ... 1218 // 1219 // --- 1220 // First we emit the label and the jump. 1221 auto CurSled = OutContext.createTempSymbol("xray_typed_event_sled_", true); 1222 OutStreamer->AddComment("# XRay Typed Event Log"); 1223 OutStreamer->EmitCodeAlignment(2); 1224 OutStreamer->EmitLabel(CurSled); 1225 1226 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1227 // an operand (computed as an offset from the jmp instruction). 1228 // FIXME: Find another less hacky way do force the relative jump. 1229 OutStreamer->EmitBinaryData("\xeb\x14"); 1230 1231 // An x86-64 convention may place three arguments into %rcx, %rdx, and R8, 1232 // so we'll work with those. Or we may be called via SystemV, in which case 1233 // we don't have to do any translation. 1234 unsigned DestRegs[] = {X86::RDI, X86::RSI, X86::RDX}; 1235 bool UsedMask[] = {false, false, false}; 1236 1237 // Will fill out src regs in the loop. 1238 unsigned SrcRegs[] = {0, 0, 0}; 1239 1240 // Then we put the operands in the SystemV registers. We spill the values in 1241 // the registers before we clobber them, and mark them as used in UsedMask. 1242 // In case the arguments are already in the correct register, we emit nops 1243 // appropriately sized to keep the sled the same size in every situation. 1244 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1245 if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) { 1246 // TODO: Is register only support adequate? 1247 assert(Op->isReg() && "Only supports arguments in registers"); 1248 SrcRegs[I] = Op->getReg(); 1249 if (SrcRegs[I] != DestRegs[I]) { 1250 UsedMask[I] = true; 1251 EmitAndCountInstruction( 1252 MCInstBuilder(X86::PUSH64r).addReg(DestRegs[I])); 1253 } else { 1254 EmitNops(*OutStreamer, 4, Subtarget->is64Bit(), getSubtargetInfo()); 1255 } 1256 } 1257 1258 // In the above loop we only stash all of the destination registers or emit 1259 // nops if the arguments are already in the right place. Doing the actually 1260 // moving is postponed until after all the registers are stashed so nothing 1261 // is clobbers. We've already added nops to account for the size of mov and 1262 // push if the register is in the right place, so we only have to worry about 1263 // emitting movs. 1264 for (unsigned I = 0; I < MI.getNumOperands(); ++I) 1265 if (UsedMask[I]) 1266 EmitAndCountInstruction( 1267 MCInstBuilder(X86::MOV64rr).addReg(DestRegs[I]).addReg(SrcRegs[I])); 1268 1269 // We emit a hard dependency on the __xray_TypedEvent symbol, which is the 1270 // name of the trampoline to be implemented by the XRay runtime. 1271 auto TSym = OutContext.getOrCreateSymbol("__xray_TypedEvent"); 1272 MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym); 1273 if (isPositionIndependent()) 1274 TOp.setTargetFlags(X86II::MO_PLT); 1275 1276 // Emit the call instruction. 1277 EmitAndCountInstruction(MCInstBuilder(X86::CALL64pcrel32) 1278 .addOperand(MCIL.LowerSymbolOperand(TOp, TSym))); 1279 1280 // Restore caller-saved and used registers. 1281 for (unsigned I = sizeof UsedMask; I-- > 0;) 1282 if (UsedMask[I]) 1283 EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(DestRegs[I])); 1284 else 1285 EmitNops(*OutStreamer, 1, Subtarget->is64Bit(), getSubtargetInfo()); 1286 1287 OutStreamer->AddComment("xray typed event end."); 1288 1289 // Record the sled version. 1290 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 0); 1291 } 1292 1293 void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI, 1294 X86MCInstLower &MCIL) { 1295 // We want to emit the following pattern: 1296 // 1297 // .p2align 1, ... 1298 // .Lxray_sled_N: 1299 // jmp .tmpN 1300 // # 9 bytes worth of noops 1301 // 1302 // We need the 9 bytes because at runtime, we'd be patching over the full 11 1303 // bytes with the following pattern: 1304 // 1305 // mov %r10, <function id, 32-bit> // 6 bytes 1306 // call <relative offset, 32-bits> // 5 bytes 1307 // 1308 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1309 OutStreamer->EmitCodeAlignment(2); 1310 OutStreamer->EmitLabel(CurSled); 1311 1312 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1313 // an operand (computed as an offset from the jmp instruction). 1314 // FIXME: Find another less hacky way do force the relative jump. 1315 OutStreamer->EmitBytes("\xeb\x09"); 1316 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1317 recordSled(CurSled, MI, SledKind::FUNCTION_ENTER); 1318 } 1319 1320 void X86AsmPrinter::LowerPATCHABLE_RET(const MachineInstr &MI, 1321 X86MCInstLower &MCIL) { 1322 // Since PATCHABLE_RET takes the opcode of the return statement as an 1323 // argument, we use that to emit the correct form of the RET that we want. 1324 // i.e. when we see this: 1325 // 1326 // PATCHABLE_RET X86::RET ... 1327 // 1328 // We should emit the RET followed by sleds. 1329 // 1330 // .p2align 1, ... 1331 // .Lxray_sled_N: 1332 // ret # or equivalent instruction 1333 // # 10 bytes worth of noops 1334 // 1335 // This just makes sure that the alignment for the next instruction is 2. 1336 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1337 OutStreamer->EmitCodeAlignment(2); 1338 OutStreamer->EmitLabel(CurSled); 1339 unsigned OpCode = MI.getOperand(0).getImm(); 1340 MCInst Ret; 1341 Ret.setOpcode(OpCode); 1342 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1343 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1344 Ret.addOperand(MaybeOperand.getValue()); 1345 OutStreamer->EmitInstruction(Ret, getSubtargetInfo()); 1346 EmitNops(*OutStreamer, 10, Subtarget->is64Bit(), getSubtargetInfo()); 1347 recordSled(CurSled, MI, SledKind::FUNCTION_EXIT); 1348 } 1349 1350 void X86AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, 1351 X86MCInstLower &MCIL) { 1352 // Like PATCHABLE_RET, we have the actual instruction in the operands to this 1353 // instruction so we lower that particular instruction and its operands. 1354 // Unlike PATCHABLE_RET though, we put the sled before the JMP, much like how 1355 // we do it for PATCHABLE_FUNCTION_ENTER. The sled should be very similar to 1356 // the PATCHABLE_FUNCTION_ENTER case, followed by the lowering of the actual 1357 // tail call much like how we have it in PATCHABLE_RET. 1358 auto CurSled = OutContext.createTempSymbol("xray_sled_", true); 1359 OutStreamer->EmitCodeAlignment(2); 1360 OutStreamer->EmitLabel(CurSled); 1361 auto Target = OutContext.createTempSymbol(); 1362 1363 // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as 1364 // an operand (computed as an offset from the jmp instruction). 1365 // FIXME: Find another less hacky way do force the relative jump. 1366 OutStreamer->EmitBytes("\xeb\x09"); 1367 EmitNops(*OutStreamer, 9, Subtarget->is64Bit(), getSubtargetInfo()); 1368 OutStreamer->EmitLabel(Target); 1369 recordSled(CurSled, MI, SledKind::TAIL_CALL); 1370 1371 unsigned OpCode = MI.getOperand(0).getImm(); 1372 MCInst TC; 1373 TC.setOpcode(OpCode); 1374 1375 // Before emitting the instruction, add a comment to indicate that this is 1376 // indeed a tail call. 1377 OutStreamer->AddComment("TAILCALL"); 1378 for (auto &MO : make_range(MI.operands_begin() + 1, MI.operands_end())) 1379 if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, MO)) 1380 TC.addOperand(MaybeOperand.getValue()); 1381 OutStreamer->EmitInstruction(TC, getSubtargetInfo()); 1382 } 1383 1384 // Returns instruction preceding MBBI in MachineFunction. 1385 // If MBBI is the first instruction of the first basic block, returns null. 1386 static MachineBasicBlock::const_iterator 1387 PrevCrossBBInst(MachineBasicBlock::const_iterator MBBI) { 1388 const MachineBasicBlock *MBB = MBBI->getParent(); 1389 while (MBBI == MBB->begin()) { 1390 if (MBB == &MBB->getParent()->front()) 1391 return MachineBasicBlock::const_iterator(); 1392 MBB = MBB->getPrevNode(); 1393 MBBI = MBB->end(); 1394 } 1395 --MBBI; 1396 return MBBI; 1397 } 1398 1399 static const Constant *getConstantFromPool(const MachineInstr &MI, 1400 const MachineOperand &Op) { 1401 if (!Op.isCPI() || Op.getOffset() != 0) 1402 return nullptr; 1403 1404 ArrayRef<MachineConstantPoolEntry> Constants = 1405 MI.getParent()->getParent()->getConstantPool()->getConstants(); 1406 const MachineConstantPoolEntry &ConstantEntry = Constants[Op.getIndex()]; 1407 1408 // Bail if this is a machine constant pool entry, we won't be able to dig out 1409 // anything useful. 1410 if (ConstantEntry.isMachineConstantPoolEntry()) 1411 return nullptr; 1412 1413 const Constant *C = ConstantEntry.Val.ConstVal; 1414 assert((!C || ConstantEntry.getType() == C->getType()) && 1415 "Expected a constant of the same type!"); 1416 return C; 1417 } 1418 1419 static std::string getShuffleComment(const MachineInstr *MI, unsigned SrcOp1Idx, 1420 unsigned SrcOp2Idx, ArrayRef<int> Mask) { 1421 std::string Comment; 1422 1423 // Compute the name for a register. This is really goofy because we have 1424 // multiple instruction printers that could (in theory) use different 1425 // names. Fortunately most people use the ATT style (outside of Windows) 1426 // and they actually agree on register naming here. Ultimately, this is 1427 // a comment, and so its OK if it isn't perfect. 1428 auto GetRegisterName = [](unsigned RegNum) -> StringRef { 1429 return X86ATTInstPrinter::getRegisterName(RegNum); 1430 }; 1431 1432 const MachineOperand &DstOp = MI->getOperand(0); 1433 const MachineOperand &SrcOp1 = MI->getOperand(SrcOp1Idx); 1434 const MachineOperand &SrcOp2 = MI->getOperand(SrcOp2Idx); 1435 1436 StringRef DstName = DstOp.isReg() ? GetRegisterName(DstOp.getReg()) : "mem"; 1437 StringRef Src1Name = 1438 SrcOp1.isReg() ? GetRegisterName(SrcOp1.getReg()) : "mem"; 1439 StringRef Src2Name = 1440 SrcOp2.isReg() ? GetRegisterName(SrcOp2.getReg()) : "mem"; 1441 1442 // One source operand, fix the mask to print all elements in one span. 1443 SmallVector<int, 8> ShuffleMask(Mask.begin(), Mask.end()); 1444 if (Src1Name == Src2Name) 1445 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) 1446 if (ShuffleMask[i] >= e) 1447 ShuffleMask[i] -= e; 1448 1449 raw_string_ostream CS(Comment); 1450 CS << DstName; 1451 1452 // Handle AVX512 MASK/MASXZ write mask comments. 1453 // MASK: zmmX {%kY} 1454 // MASKZ: zmmX {%kY} {z} 1455 if (SrcOp1Idx > 1) { 1456 assert((SrcOp1Idx == 2 || SrcOp1Idx == 3) && "Unexpected writemask"); 1457 1458 const MachineOperand &WriteMaskOp = MI->getOperand(SrcOp1Idx - 1); 1459 if (WriteMaskOp.isReg()) { 1460 CS << " {%" << GetRegisterName(WriteMaskOp.getReg()) << "}"; 1461 1462 if (SrcOp1Idx == 2) { 1463 CS << " {z}"; 1464 } 1465 } 1466 } 1467 1468 CS << " = "; 1469 1470 for (int i = 0, e = ShuffleMask.size(); i != e; ++i) { 1471 if (i != 0) 1472 CS << ","; 1473 if (ShuffleMask[i] == SM_SentinelZero) { 1474 CS << "zero"; 1475 continue; 1476 } 1477 1478 // Otherwise, it must come from src1 or src2. Print the span of elements 1479 // that comes from this src. 1480 bool isSrc1 = ShuffleMask[i] < (int)e; 1481 CS << (isSrc1 ? Src1Name : Src2Name) << '['; 1482 1483 bool IsFirst = true; 1484 while (i != e && ShuffleMask[i] != SM_SentinelZero && 1485 (ShuffleMask[i] < (int)e) == isSrc1) { 1486 if (!IsFirst) 1487 CS << ','; 1488 else 1489 IsFirst = false; 1490 if (ShuffleMask[i] == SM_SentinelUndef) 1491 CS << "u"; 1492 else 1493 CS << ShuffleMask[i] % (int)e; 1494 ++i; 1495 } 1496 CS << ']'; 1497 --i; // For loop increments element #. 1498 } 1499 CS.flush(); 1500 1501 return Comment; 1502 } 1503 1504 static void printConstant(const APInt &Val, raw_ostream &CS) { 1505 if (Val.getBitWidth() <= 64) { 1506 CS << Val.getZExtValue(); 1507 } else { 1508 // print multi-word constant as (w0,w1) 1509 CS << "("; 1510 for (int i = 0, N = Val.getNumWords(); i < N; ++i) { 1511 if (i > 0) 1512 CS << ","; 1513 CS << Val.getRawData()[i]; 1514 } 1515 CS << ")"; 1516 } 1517 } 1518 1519 static void printConstant(const APFloat &Flt, raw_ostream &CS) { 1520 SmallString<32> Str; 1521 // Force scientific notation to distinquish from integers. 1522 Flt.toString(Str, 0, 0); 1523 CS << Str; 1524 } 1525 1526 static void printConstant(const Constant *COp, raw_ostream &CS) { 1527 if (isa<UndefValue>(COp)) { 1528 CS << "u"; 1529 } else if (auto *CI = dyn_cast<ConstantInt>(COp)) { 1530 printConstant(CI->getValue(), CS); 1531 } else if (auto *CF = dyn_cast<ConstantFP>(COp)) { 1532 printConstant(CF->getValueAPF(), CS); 1533 } else { 1534 CS << "?"; 1535 } 1536 } 1537 1538 void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) { 1539 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1540 assert(getSubtarget().isOSWindows() && "SEH_ instruction Windows only"); 1541 const X86RegisterInfo *RI = 1542 MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1543 1544 // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86. 1545 if (EmitFPOData) { 1546 X86TargetStreamer *XTS = 1547 static_cast<X86TargetStreamer *>(OutStreamer->getTargetStreamer()); 1548 switch (MI->getOpcode()) { 1549 case X86::SEH_PushReg: 1550 XTS->emitFPOPushReg(MI->getOperand(0).getImm()); 1551 break; 1552 case X86::SEH_StackAlloc: 1553 XTS->emitFPOStackAlloc(MI->getOperand(0).getImm()); 1554 break; 1555 case X86::SEH_StackAlign: 1556 XTS->emitFPOStackAlign(MI->getOperand(0).getImm()); 1557 break; 1558 case X86::SEH_SetFrame: 1559 assert(MI->getOperand(1).getImm() == 0 && 1560 ".cv_fpo_setframe takes no offset"); 1561 XTS->emitFPOSetFrame(MI->getOperand(0).getImm()); 1562 break; 1563 case X86::SEH_EndPrologue: 1564 XTS->emitFPOEndPrologue(); 1565 break; 1566 case X86::SEH_SaveReg: 1567 case X86::SEH_SaveXMM: 1568 case X86::SEH_PushFrame: 1569 llvm_unreachable("SEH_ directive incompatible with FPO"); 1570 break; 1571 default: 1572 llvm_unreachable("expected SEH_ instruction"); 1573 } 1574 return; 1575 } 1576 1577 // Otherwise, use the .seh_ directives for all other Windows platforms. 1578 switch (MI->getOpcode()) { 1579 case X86::SEH_PushReg: 1580 OutStreamer->EmitWinCFIPushReg( 1581 RI->getSEHRegNum(MI->getOperand(0).getImm())); 1582 break; 1583 1584 case X86::SEH_SaveReg: 1585 OutStreamer->EmitWinCFISaveReg(RI->getSEHRegNum(MI->getOperand(0).getImm()), 1586 MI->getOperand(1).getImm()); 1587 break; 1588 1589 case X86::SEH_SaveXMM: 1590 OutStreamer->EmitWinCFISaveXMM(RI->getSEHRegNum(MI->getOperand(0).getImm()), 1591 MI->getOperand(1).getImm()); 1592 break; 1593 1594 case X86::SEH_StackAlloc: 1595 OutStreamer->EmitWinCFIAllocStack(MI->getOperand(0).getImm()); 1596 break; 1597 1598 case X86::SEH_SetFrame: 1599 OutStreamer->EmitWinCFISetFrame( 1600 RI->getSEHRegNum(MI->getOperand(0).getImm()), 1601 MI->getOperand(1).getImm()); 1602 break; 1603 1604 case X86::SEH_PushFrame: 1605 OutStreamer->EmitWinCFIPushFrame(MI->getOperand(0).getImm()); 1606 break; 1607 1608 case X86::SEH_EndPrologue: 1609 OutStreamer->EmitWinCFIEndProlog(); 1610 break; 1611 1612 default: 1613 llvm_unreachable("expected SEH_ instruction"); 1614 } 1615 } 1616 1617 static unsigned getRegisterWidth(const MCOperandInfo &Info) { 1618 if (Info.RegClass == X86::VR128RegClassID || 1619 Info.RegClass == X86::VR128XRegClassID) 1620 return 128; 1621 if (Info.RegClass == X86::VR256RegClassID || 1622 Info.RegClass == X86::VR256XRegClassID) 1623 return 256; 1624 if (Info.RegClass == X86::VR512RegClassID) 1625 return 512; 1626 llvm_unreachable("Unknown register class!"); 1627 } 1628 1629 void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { 1630 X86MCInstLower MCInstLowering(*MF, *this); 1631 const X86RegisterInfo *RI = 1632 MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1633 1634 // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that 1635 // are compressed from EVEX encoding to VEX encoding. 1636 if (TM.Options.MCOptions.ShowMCEncoding) { 1637 if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX) 1638 OutStreamer->AddComment("EVEX TO VEX Compression ", false); 1639 } 1640 1641 switch (MI->getOpcode()) { 1642 case TargetOpcode::DBG_VALUE: 1643 llvm_unreachable("Should be handled target independently"); 1644 1645 // Emit nothing here but a comment if we can. 1646 case X86::Int_MemBarrier: 1647 OutStreamer->emitRawComment("MEMBARRIER"); 1648 return; 1649 1650 case X86::EH_RETURN: 1651 case X86::EH_RETURN64: { 1652 // Lower these as normal, but add some comments. 1653 unsigned Reg = MI->getOperand(0).getReg(); 1654 OutStreamer->AddComment(StringRef("eh_return, addr: %") + 1655 X86ATTInstPrinter::getRegisterName(Reg)); 1656 break; 1657 } 1658 case X86::CLEANUPRET: { 1659 // Lower these as normal, but add some comments. 1660 OutStreamer->AddComment("CLEANUPRET"); 1661 break; 1662 } 1663 1664 case X86::CATCHRET: { 1665 // Lower these as normal, but add some comments. 1666 OutStreamer->AddComment("CATCHRET"); 1667 break; 1668 } 1669 1670 case X86::TAILJMPr: 1671 case X86::TAILJMPm: 1672 case X86::TAILJMPd: 1673 case X86::TAILJMPd_CC: 1674 case X86::TAILJMPr64: 1675 case X86::TAILJMPm64: 1676 case X86::TAILJMPd64: 1677 case X86::TAILJMPd64_CC: 1678 case X86::TAILJMPr64_REX: 1679 case X86::TAILJMPm64_REX: 1680 // Lower these as normal, but add some comments. 1681 OutStreamer->AddComment("TAILCALL"); 1682 break; 1683 1684 case X86::TLS_addr32: 1685 case X86::TLS_addr64: 1686 case X86::TLS_base_addr32: 1687 case X86::TLS_base_addr64: 1688 return LowerTlsAddr(MCInstLowering, *MI); 1689 1690 // Loading/storing mask pairs requires two kmov operations. The second one of these 1691 // needs a 2 byte displacement relative to the specified address (with 32 bit spill 1692 // size). The pairs of 1bit masks up to 16 bit masks all use the same spill size, 1693 // they all are stored using MASKPAIR16STORE, loaded using MASKPAIR16LOAD. 1694 // 1695 // The displacement value might wrap around in theory, thus the asserts in both 1696 // cases. 1697 case X86::MASKPAIR16LOAD: { 1698 int64_t Disp = MI->getOperand(1 + X86::AddrDisp).getImm(); 1699 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); 1700 const X86RegisterInfo *RI = 1701 MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1702 unsigned Reg = MI->getOperand(0).getReg(); 1703 unsigned Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); 1704 unsigned Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); 1705 1706 // Load the first mask register 1707 MCInstBuilder MIB = MCInstBuilder(X86::KMOVWkm); 1708 MIB.addReg(Reg0); 1709 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1710 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(1 + i)); 1711 MIB.addOperand(Op.getValue()); 1712 } 1713 EmitAndCountInstruction(MIB); 1714 1715 // Load the second mask register of the pair 1716 MIB = MCInstBuilder(X86::KMOVWkm); 1717 MIB.addReg(Reg1); 1718 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1719 if (i == X86::AddrDisp) { 1720 MIB.addImm(Disp + 2); 1721 } else { 1722 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(1 + i)); 1723 MIB.addOperand(Op.getValue()); 1724 } 1725 } 1726 EmitAndCountInstruction(MIB); 1727 return; 1728 } 1729 1730 case X86::MASKPAIR16STORE: { 1731 int64_t Disp = MI->getOperand(X86::AddrDisp).getImm(); 1732 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement"); 1733 const X86RegisterInfo *RI = 1734 MF->getSubtarget<X86Subtarget>().getRegisterInfo(); 1735 unsigned Reg = MI->getOperand(X86::AddrNumOperands).getReg(); 1736 unsigned Reg0 = RI->getSubReg(Reg, X86::sub_mask_0); 1737 unsigned Reg1 = RI->getSubReg(Reg, X86::sub_mask_1); 1738 1739 // Store the first mask register 1740 MCInstBuilder MIB = MCInstBuilder(X86::KMOVWmk); 1741 for (int i = 0; i < X86::AddrNumOperands; ++i) 1742 MIB.addOperand(MCInstLowering.LowerMachineOperand(MI, MI->getOperand(i)).getValue()); 1743 MIB.addReg(Reg0); 1744 EmitAndCountInstruction(MIB); 1745 1746 // Store the second mask register of the pair 1747 MIB = MCInstBuilder(X86::KMOVWmk); 1748 for (int i = 0; i < X86::AddrNumOperands; ++i) { 1749 if (i == X86::AddrDisp) { 1750 MIB.addImm(Disp + 2); 1751 } else { 1752 auto Op = MCInstLowering.LowerMachineOperand(MI, MI->getOperand(0 + i)); 1753 MIB.addOperand(Op.getValue()); 1754 } 1755 } 1756 MIB.addReg(Reg1); 1757 EmitAndCountInstruction(MIB); 1758 return; 1759 } 1760 1761 case X86::MOVPC32r: { 1762 // This is a pseudo op for a two instruction sequence with a label, which 1763 // looks like: 1764 // call "L1$pb" 1765 // "L1$pb": 1766 // popl %esi 1767 1768 // Emit the call. 1769 MCSymbol *PICBase = MF->getPICBaseSymbol(); 1770 // FIXME: We would like an efficient form for this, so we don't have to do a 1771 // lot of extra uniquing. 1772 EmitAndCountInstruction( 1773 MCInstBuilder(X86::CALLpcrel32) 1774 .addExpr(MCSymbolRefExpr::create(PICBase, OutContext))); 1775 1776 const X86FrameLowering *FrameLowering = 1777 MF->getSubtarget<X86Subtarget>().getFrameLowering(); 1778 bool hasFP = FrameLowering->hasFP(*MF); 1779 1780 // TODO: This is needed only if we require precise CFA. 1781 bool HasActiveDwarfFrame = OutStreamer->getNumFrameInfos() && 1782 !OutStreamer->getDwarfFrameInfos().back().End; 1783 1784 int stackGrowth = -RI->getSlotSize(); 1785 1786 if (HasActiveDwarfFrame && !hasFP) { 1787 OutStreamer->EmitCFIAdjustCfaOffset(-stackGrowth); 1788 } 1789 1790 // Emit the label. 1791 OutStreamer->EmitLabel(PICBase); 1792 1793 // popl $reg 1794 EmitAndCountInstruction( 1795 MCInstBuilder(X86::POP32r).addReg(MI->getOperand(0).getReg())); 1796 1797 if (HasActiveDwarfFrame && !hasFP) { 1798 OutStreamer->EmitCFIAdjustCfaOffset(stackGrowth); 1799 } 1800 return; 1801 } 1802 1803 case X86::ADD32ri: { 1804 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri. 1805 if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS) 1806 break; 1807 1808 // Okay, we have something like: 1809 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL) 1810 1811 // For this, we want to print something like: 1812 // MYGLOBAL + (. - PICBASE) 1813 // However, we can't generate a ".", so just emit a new label here and refer 1814 // to it. 1815 MCSymbol *DotSym = OutContext.createTempSymbol(); 1816 OutStreamer->EmitLabel(DotSym); 1817 1818 // Now that we have emitted the label, lower the complex operand expression. 1819 MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2)); 1820 1821 const MCExpr *DotExpr = MCSymbolRefExpr::create(DotSym, OutContext); 1822 const MCExpr *PICBase = 1823 MCSymbolRefExpr::create(MF->getPICBaseSymbol(), OutContext); 1824 DotExpr = MCBinaryExpr::createSub(DotExpr, PICBase, OutContext); 1825 1826 DotExpr = MCBinaryExpr::createAdd( 1827 MCSymbolRefExpr::create(OpSym, OutContext), DotExpr, OutContext); 1828 1829 EmitAndCountInstruction(MCInstBuilder(X86::ADD32ri) 1830 .addReg(MI->getOperand(0).getReg()) 1831 .addReg(MI->getOperand(1).getReg()) 1832 .addExpr(DotExpr)); 1833 return; 1834 } 1835 case TargetOpcode::STATEPOINT: 1836 return LowerSTATEPOINT(*MI, MCInstLowering); 1837 1838 case TargetOpcode::FAULTING_OP: 1839 return LowerFAULTING_OP(*MI, MCInstLowering); 1840 1841 case TargetOpcode::FENTRY_CALL: 1842 return LowerFENTRY_CALL(*MI, MCInstLowering); 1843 1844 case TargetOpcode::PATCHABLE_OP: 1845 return LowerPATCHABLE_OP(*MI, MCInstLowering); 1846 1847 case TargetOpcode::STACKMAP: 1848 return LowerSTACKMAP(*MI); 1849 1850 case TargetOpcode::PATCHPOINT: 1851 return LowerPATCHPOINT(*MI, MCInstLowering); 1852 1853 case TargetOpcode::PATCHABLE_FUNCTION_ENTER: 1854 return LowerPATCHABLE_FUNCTION_ENTER(*MI, MCInstLowering); 1855 1856 case TargetOpcode::PATCHABLE_RET: 1857 return LowerPATCHABLE_RET(*MI, MCInstLowering); 1858 1859 case TargetOpcode::PATCHABLE_TAIL_CALL: 1860 return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering); 1861 1862 case TargetOpcode::PATCHABLE_EVENT_CALL: 1863 return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering); 1864 1865 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL: 1866 return LowerPATCHABLE_TYPED_EVENT_CALL(*MI, MCInstLowering); 1867 1868 case X86::MORESTACK_RET: 1869 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 1870 return; 1871 1872 case X86::MORESTACK_RET_RESTORE_R10: 1873 // Return, then restore R10. 1874 EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget))); 1875 EmitAndCountInstruction( 1876 MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX)); 1877 return; 1878 1879 case X86::SEH_PushReg: 1880 case X86::SEH_SaveReg: 1881 case X86::SEH_SaveXMM: 1882 case X86::SEH_StackAlloc: 1883 case X86::SEH_StackAlign: 1884 case X86::SEH_SetFrame: 1885 case X86::SEH_PushFrame: 1886 case X86::SEH_EndPrologue: 1887 EmitSEHInstruction(MI); 1888 return; 1889 1890 case X86::SEH_Epilogue: { 1891 assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?"); 1892 MachineBasicBlock::const_iterator MBBI(MI); 1893 // Check if preceded by a call and emit nop if so. 1894 for (MBBI = PrevCrossBBInst(MBBI); 1895 MBBI != MachineBasicBlock::const_iterator(); 1896 MBBI = PrevCrossBBInst(MBBI)) { 1897 // Conservatively assume that pseudo instructions don't emit code and keep 1898 // looking for a call. We may emit an unnecessary nop in some cases. 1899 if (!MBBI->isPseudo()) { 1900 if (MBBI->isCall()) 1901 EmitAndCountInstruction(MCInstBuilder(X86::NOOP)); 1902 break; 1903 } 1904 } 1905 return; 1906 } 1907 1908 // Lower PSHUFB and VPERMILP normally but add a comment if we can find 1909 // a constant shuffle mask. We won't be able to do this at the MC layer 1910 // because the mask isn't an immediate. 1911 case X86::PSHUFBrm: 1912 case X86::VPSHUFBrm: 1913 case X86::VPSHUFBYrm: 1914 case X86::VPSHUFBZ128rm: 1915 case X86::VPSHUFBZ128rmk: 1916 case X86::VPSHUFBZ128rmkz: 1917 case X86::VPSHUFBZ256rm: 1918 case X86::VPSHUFBZ256rmk: 1919 case X86::VPSHUFBZ256rmkz: 1920 case X86::VPSHUFBZrm: 1921 case X86::VPSHUFBZrmk: 1922 case X86::VPSHUFBZrmkz: { 1923 if (!OutStreamer->isVerboseAsm()) 1924 break; 1925 unsigned SrcIdx, MaskIdx; 1926 switch (MI->getOpcode()) { 1927 default: llvm_unreachable("Invalid opcode"); 1928 case X86::PSHUFBrm: 1929 case X86::VPSHUFBrm: 1930 case X86::VPSHUFBYrm: 1931 case X86::VPSHUFBZ128rm: 1932 case X86::VPSHUFBZ256rm: 1933 case X86::VPSHUFBZrm: 1934 SrcIdx = 1; MaskIdx = 5; break; 1935 case X86::VPSHUFBZ128rmkz: 1936 case X86::VPSHUFBZ256rmkz: 1937 case X86::VPSHUFBZrmkz: 1938 SrcIdx = 2; MaskIdx = 6; break; 1939 case X86::VPSHUFBZ128rmk: 1940 case X86::VPSHUFBZ256rmk: 1941 case X86::VPSHUFBZrmk: 1942 SrcIdx = 3; MaskIdx = 7; break; 1943 } 1944 1945 assert(MI->getNumOperands() >= 6 && 1946 "We should always have at least 6 operands!"); 1947 1948 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 1949 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 1950 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 1951 SmallVector<int, 64> Mask; 1952 DecodePSHUFBMask(C, Width, Mask); 1953 if (!Mask.empty()) 1954 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 1955 } 1956 break; 1957 } 1958 1959 case X86::VPERMILPSrm: 1960 case X86::VPERMILPSYrm: 1961 case X86::VPERMILPSZ128rm: 1962 case X86::VPERMILPSZ128rmk: 1963 case X86::VPERMILPSZ128rmkz: 1964 case X86::VPERMILPSZ256rm: 1965 case X86::VPERMILPSZ256rmk: 1966 case X86::VPERMILPSZ256rmkz: 1967 case X86::VPERMILPSZrm: 1968 case X86::VPERMILPSZrmk: 1969 case X86::VPERMILPSZrmkz: 1970 case X86::VPERMILPDrm: 1971 case X86::VPERMILPDYrm: 1972 case X86::VPERMILPDZ128rm: 1973 case X86::VPERMILPDZ128rmk: 1974 case X86::VPERMILPDZ128rmkz: 1975 case X86::VPERMILPDZ256rm: 1976 case X86::VPERMILPDZ256rmk: 1977 case X86::VPERMILPDZ256rmkz: 1978 case X86::VPERMILPDZrm: 1979 case X86::VPERMILPDZrmk: 1980 case X86::VPERMILPDZrmkz: { 1981 if (!OutStreamer->isVerboseAsm()) 1982 break; 1983 unsigned SrcIdx, MaskIdx; 1984 unsigned ElSize; 1985 switch (MI->getOpcode()) { 1986 default: llvm_unreachable("Invalid opcode"); 1987 case X86::VPERMILPSrm: 1988 case X86::VPERMILPSYrm: 1989 case X86::VPERMILPSZ128rm: 1990 case X86::VPERMILPSZ256rm: 1991 case X86::VPERMILPSZrm: 1992 SrcIdx = 1; MaskIdx = 5; ElSize = 32; break; 1993 case X86::VPERMILPSZ128rmkz: 1994 case X86::VPERMILPSZ256rmkz: 1995 case X86::VPERMILPSZrmkz: 1996 SrcIdx = 2; MaskIdx = 6; ElSize = 32; break; 1997 case X86::VPERMILPSZ128rmk: 1998 case X86::VPERMILPSZ256rmk: 1999 case X86::VPERMILPSZrmk: 2000 SrcIdx = 3; MaskIdx = 7; ElSize = 32; break; 2001 case X86::VPERMILPDrm: 2002 case X86::VPERMILPDYrm: 2003 case X86::VPERMILPDZ128rm: 2004 case X86::VPERMILPDZ256rm: 2005 case X86::VPERMILPDZrm: 2006 SrcIdx = 1; MaskIdx = 5; ElSize = 64; break; 2007 case X86::VPERMILPDZ128rmkz: 2008 case X86::VPERMILPDZ256rmkz: 2009 case X86::VPERMILPDZrmkz: 2010 SrcIdx = 2; MaskIdx = 6; ElSize = 64; break; 2011 case X86::VPERMILPDZ128rmk: 2012 case X86::VPERMILPDZ256rmk: 2013 case X86::VPERMILPDZrmk: 2014 SrcIdx = 3; MaskIdx = 7; ElSize = 64; break; 2015 } 2016 2017 assert(MI->getNumOperands() >= 6 && 2018 "We should always have at least 6 operands!"); 2019 2020 const MachineOperand &MaskOp = MI->getOperand(MaskIdx); 2021 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2022 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2023 SmallVector<int, 16> Mask; 2024 DecodeVPERMILPMask(C, ElSize, Width, Mask); 2025 if (!Mask.empty()) 2026 OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask)); 2027 } 2028 break; 2029 } 2030 2031 case X86::VPERMIL2PDrm: 2032 case X86::VPERMIL2PSrm: 2033 case X86::VPERMIL2PDYrm: 2034 case X86::VPERMIL2PSYrm: { 2035 if (!OutStreamer->isVerboseAsm()) 2036 break; 2037 assert(MI->getNumOperands() >= 8 && 2038 "We should always have at least 8 operands!"); 2039 2040 const MachineOperand &CtrlOp = MI->getOperand(MI->getNumOperands() - 1); 2041 if (!CtrlOp.isImm()) 2042 break; 2043 2044 unsigned ElSize; 2045 switch (MI->getOpcode()) { 2046 default: llvm_unreachable("Invalid opcode"); 2047 case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break; 2048 case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break; 2049 } 2050 2051 const MachineOperand &MaskOp = MI->getOperand(6); 2052 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2053 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2054 SmallVector<int, 16> Mask; 2055 DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask); 2056 if (!Mask.empty()) 2057 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 2058 } 2059 break; 2060 } 2061 2062 case X86::VPPERMrrm: { 2063 if (!OutStreamer->isVerboseAsm()) 2064 break; 2065 assert(MI->getNumOperands() >= 7 && 2066 "We should always have at least 7 operands!"); 2067 2068 const MachineOperand &MaskOp = MI->getOperand(6); 2069 if (auto *C = getConstantFromPool(*MI, MaskOp)) { 2070 unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); 2071 SmallVector<int, 16> Mask; 2072 DecodeVPPERMMask(C, Width, Mask); 2073 if (!Mask.empty()) 2074 OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask)); 2075 } 2076 break; 2077 } 2078 2079 case X86::MMX_MOVQ64rm: { 2080 if (!OutStreamer->isVerboseAsm()) 2081 break; 2082 if (MI->getNumOperands() <= 4) 2083 break; 2084 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2085 std::string Comment; 2086 raw_string_ostream CS(Comment); 2087 const MachineOperand &DstOp = MI->getOperand(0); 2088 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2089 if (auto *CF = dyn_cast<ConstantFP>(C)) { 2090 CS << "0x" << CF->getValueAPF().bitcastToAPInt().toString(16, false); 2091 OutStreamer->AddComment(CS.str()); 2092 } 2093 } 2094 break; 2095 } 2096 2097 #define MOV_CASE(Prefix, Suffix) \ 2098 case X86::Prefix##MOVAPD##Suffix##rm: \ 2099 case X86::Prefix##MOVAPS##Suffix##rm: \ 2100 case X86::Prefix##MOVUPD##Suffix##rm: \ 2101 case X86::Prefix##MOVUPS##Suffix##rm: \ 2102 case X86::Prefix##MOVDQA##Suffix##rm: \ 2103 case X86::Prefix##MOVDQU##Suffix##rm: 2104 2105 #define MOV_AVX512_CASE(Suffix) \ 2106 case X86::VMOVDQA64##Suffix##rm: \ 2107 case X86::VMOVDQA32##Suffix##rm: \ 2108 case X86::VMOVDQU64##Suffix##rm: \ 2109 case X86::VMOVDQU32##Suffix##rm: \ 2110 case X86::VMOVDQU16##Suffix##rm: \ 2111 case X86::VMOVDQU8##Suffix##rm: \ 2112 case X86::VMOVAPS##Suffix##rm: \ 2113 case X86::VMOVAPD##Suffix##rm: \ 2114 case X86::VMOVUPS##Suffix##rm: \ 2115 case X86::VMOVUPD##Suffix##rm: 2116 2117 #define CASE_ALL_MOV_RM() \ 2118 MOV_CASE(, ) /* SSE */ \ 2119 MOV_CASE(V, ) /* AVX-128 */ \ 2120 MOV_CASE(V, Y) /* AVX-256 */ \ 2121 MOV_AVX512_CASE(Z) \ 2122 MOV_AVX512_CASE(Z256) \ 2123 MOV_AVX512_CASE(Z128) 2124 2125 // For loads from a constant pool to a vector register, print the constant 2126 // loaded. 2127 CASE_ALL_MOV_RM() 2128 case X86::VBROADCASTF128: 2129 case X86::VBROADCASTI128: 2130 case X86::VBROADCASTF32X4Z256rm: 2131 case X86::VBROADCASTF32X4rm: 2132 case X86::VBROADCASTF32X8rm: 2133 case X86::VBROADCASTF64X2Z128rm: 2134 case X86::VBROADCASTF64X2rm: 2135 case X86::VBROADCASTF64X4rm: 2136 case X86::VBROADCASTI32X4Z256rm: 2137 case X86::VBROADCASTI32X4rm: 2138 case X86::VBROADCASTI32X8rm: 2139 case X86::VBROADCASTI64X2Z128rm: 2140 case X86::VBROADCASTI64X2rm: 2141 case X86::VBROADCASTI64X4rm: 2142 if (!OutStreamer->isVerboseAsm()) 2143 break; 2144 if (MI->getNumOperands() <= 4) 2145 break; 2146 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2147 int NumLanes = 1; 2148 // Override NumLanes for the broadcast instructions. 2149 switch (MI->getOpcode()) { 2150 case X86::VBROADCASTF128: NumLanes = 2; break; 2151 case X86::VBROADCASTI128: NumLanes = 2; break; 2152 case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break; 2153 case X86::VBROADCASTF32X4rm: NumLanes = 4; break; 2154 case X86::VBROADCASTF32X8rm: NumLanes = 2; break; 2155 case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break; 2156 case X86::VBROADCASTF64X2rm: NumLanes = 4; break; 2157 case X86::VBROADCASTF64X4rm: NumLanes = 2; break; 2158 case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break; 2159 case X86::VBROADCASTI32X4rm: NumLanes = 4; break; 2160 case X86::VBROADCASTI32X8rm: NumLanes = 2; break; 2161 case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break; 2162 case X86::VBROADCASTI64X2rm: NumLanes = 4; break; 2163 case X86::VBROADCASTI64X4rm: NumLanes = 2; break; 2164 } 2165 2166 std::string Comment; 2167 raw_string_ostream CS(Comment); 2168 const MachineOperand &DstOp = MI->getOperand(0); 2169 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2170 if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) { 2171 CS << "["; 2172 for (int l = 0; l != NumLanes; ++l) { 2173 for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; 2174 ++i) { 2175 if (i != 0 || l != 0) 2176 CS << ","; 2177 if (CDS->getElementType()->isIntegerTy()) 2178 printConstant(CDS->getElementAsAPInt(i), CS); 2179 else if (CDS->getElementType()->isHalfTy() || 2180 CDS->getElementType()->isFloatTy() || 2181 CDS->getElementType()->isDoubleTy()) 2182 printConstant(CDS->getElementAsAPFloat(i), CS); 2183 else 2184 CS << "?"; 2185 } 2186 } 2187 CS << "]"; 2188 OutStreamer->AddComment(CS.str()); 2189 } else if (auto *CV = dyn_cast<ConstantVector>(C)) { 2190 CS << "<"; 2191 for (int l = 0; l != NumLanes; ++l) { 2192 for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; 2193 ++i) { 2194 if (i != 0 || l != 0) 2195 CS << ","; 2196 printConstant(CV->getOperand(i), CS); 2197 } 2198 } 2199 CS << ">"; 2200 OutStreamer->AddComment(CS.str()); 2201 } 2202 } 2203 break; 2204 case X86::MOVDDUPrm: 2205 case X86::VMOVDDUPrm: 2206 case X86::VMOVDDUPZ128rm: 2207 case X86::VBROADCASTSSrm: 2208 case X86::VBROADCASTSSYrm: 2209 case X86::VBROADCASTSSZ128m: 2210 case X86::VBROADCASTSSZ256m: 2211 case X86::VBROADCASTSSZm: 2212 case X86::VBROADCASTSDYrm: 2213 case X86::VBROADCASTSDZ256m: 2214 case X86::VBROADCASTSDZm: 2215 case X86::VPBROADCASTBrm: 2216 case X86::VPBROADCASTBYrm: 2217 case X86::VPBROADCASTBZ128m: 2218 case X86::VPBROADCASTBZ256m: 2219 case X86::VPBROADCASTBZm: 2220 case X86::VPBROADCASTDrm: 2221 case X86::VPBROADCASTDYrm: 2222 case X86::VPBROADCASTDZ128m: 2223 case X86::VPBROADCASTDZ256m: 2224 case X86::VPBROADCASTDZm: 2225 case X86::VPBROADCASTQrm: 2226 case X86::VPBROADCASTQYrm: 2227 case X86::VPBROADCASTQZ128m: 2228 case X86::VPBROADCASTQZ256m: 2229 case X86::VPBROADCASTQZm: 2230 case X86::VPBROADCASTWrm: 2231 case X86::VPBROADCASTWYrm: 2232 case X86::VPBROADCASTWZ128m: 2233 case X86::VPBROADCASTWZ256m: 2234 case X86::VPBROADCASTWZm: 2235 if (!OutStreamer->isVerboseAsm()) 2236 break; 2237 if (MI->getNumOperands() <= 4) 2238 break; 2239 if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) { 2240 int NumElts; 2241 switch (MI->getOpcode()) { 2242 default: llvm_unreachable("Invalid opcode"); 2243 case X86::MOVDDUPrm: NumElts = 2; break; 2244 case X86::VMOVDDUPrm: NumElts = 2; break; 2245 case X86::VMOVDDUPZ128rm: NumElts = 2; break; 2246 case X86::VBROADCASTSSrm: NumElts = 4; break; 2247 case X86::VBROADCASTSSYrm: NumElts = 8; break; 2248 case X86::VBROADCASTSSZ128m: NumElts = 4; break; 2249 case X86::VBROADCASTSSZ256m: NumElts = 8; break; 2250 case X86::VBROADCASTSSZm: NumElts = 16; break; 2251 case X86::VBROADCASTSDYrm: NumElts = 4; break; 2252 case X86::VBROADCASTSDZ256m: NumElts = 4; break; 2253 case X86::VBROADCASTSDZm: NumElts = 8; break; 2254 case X86::VPBROADCASTBrm: NumElts = 16; break; 2255 case X86::VPBROADCASTBYrm: NumElts = 32; break; 2256 case X86::VPBROADCASTBZ128m: NumElts = 16; break; 2257 case X86::VPBROADCASTBZ256m: NumElts = 32; break; 2258 case X86::VPBROADCASTBZm: NumElts = 64; break; 2259 case X86::VPBROADCASTDrm: NumElts = 4; break; 2260 case X86::VPBROADCASTDYrm: NumElts = 8; break; 2261 case X86::VPBROADCASTDZ128m: NumElts = 4; break; 2262 case X86::VPBROADCASTDZ256m: NumElts = 8; break; 2263 case X86::VPBROADCASTDZm: NumElts = 16; break; 2264 case X86::VPBROADCASTQrm: NumElts = 2; break; 2265 case X86::VPBROADCASTQYrm: NumElts = 4; break; 2266 case X86::VPBROADCASTQZ128m: NumElts = 2; break; 2267 case X86::VPBROADCASTQZ256m: NumElts = 4; break; 2268 case X86::VPBROADCASTQZm: NumElts = 8; break; 2269 case X86::VPBROADCASTWrm: NumElts = 8; break; 2270 case X86::VPBROADCASTWYrm: NumElts = 16; break; 2271 case X86::VPBROADCASTWZ128m: NumElts = 8; break; 2272 case X86::VPBROADCASTWZ256m: NumElts = 16; break; 2273 case X86::VPBROADCASTWZm: NumElts = 32; break; 2274 } 2275 2276 std::string Comment; 2277 raw_string_ostream CS(Comment); 2278 const MachineOperand &DstOp = MI->getOperand(0); 2279 CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = "; 2280 CS << "["; 2281 for (int i = 0; i != NumElts; ++i) { 2282 if (i != 0) 2283 CS << ","; 2284 printConstant(C, CS); 2285 } 2286 CS << "]"; 2287 OutStreamer->AddComment(CS.str()); 2288 } 2289 } 2290 2291 MCInst TmpInst; 2292 MCInstLowering.Lower(MI, TmpInst); 2293 2294 // Stackmap shadows cannot include branch targets, so we can count the bytes 2295 // in a call towards the shadow, but must ensure that the no thread returns 2296 // in to the stackmap shadow. The only way to achieve this is if the call 2297 // is at the end of the shadow. 2298 if (MI->isCall()) { 2299 // Count then size of the call towards the shadow 2300 SMShadowTracker.count(TmpInst, getSubtargetInfo(), CodeEmitter.get()); 2301 // Then flush the shadow so that we fill with nops before the call, not 2302 // after it. 2303 SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo()); 2304 // Then emit the call 2305 OutStreamer->EmitInstruction(TmpInst, getSubtargetInfo()); 2306 return; 2307 } 2308 2309 EmitAndCountInstruction(TmpInst); 2310 } 2311