1 //===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Code to lower AMDGPU MachineInstrs to their corresponding MCInst. 11 // 12 //===----------------------------------------------------------------------===// 13 // 14 15 #include "AMDGPUMCInstLower.h" 16 #include "AMDGPUAsmPrinter.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "MCTargetDesc/AMDGPUInstPrinter.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "llvm/CodeGen/MachineBasicBlock.h" 21 #include "llvm/CodeGen/MachineInstr.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/IR/GlobalVariable.h" 25 #include "llvm/MC/MCCodeEmitter.h" 26 #include "llvm/MC/MCContext.h" 27 #include "llvm/MC/MCExpr.h" 28 #include "llvm/MC/MCInst.h" 29 #include "llvm/MC/MCObjectStreamer.h" 30 #include "llvm/MC/MCStreamer.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/Format.h" 33 #include <algorithm> 34 35 using namespace llvm; 36 37 #include "AMDGPUGenMCPseudoLowering.inc" 38 39 AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, 40 const TargetSubtargetInfo &st, 41 const AsmPrinter &ap): 42 Ctx(ctx), ST(st), AP(ap) { } 43 44 static MCSymbolRefExpr::VariantKind getVariantKind(unsigned MOFlags) { 45 switch (MOFlags) { 46 default: 47 return MCSymbolRefExpr::VK_None; 48 case SIInstrInfo::MO_GOTPCREL: 49 return MCSymbolRefExpr::VK_GOTPCREL; 50 case SIInstrInfo::MO_GOTPCREL32_LO: 51 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_LO; 52 case SIInstrInfo::MO_GOTPCREL32_HI: 53 return MCSymbolRefExpr::VK_AMDGPU_GOTPCREL32_HI; 54 case SIInstrInfo::MO_REL32_LO: 55 return MCSymbolRefExpr::VK_AMDGPU_REL32_LO; 56 case SIInstrInfo::MO_REL32_HI: 57 return MCSymbolRefExpr::VK_AMDGPU_REL32_HI; 58 case SIInstrInfo::MO_ABS32_LO: 59 return MCSymbolRefExpr::VK_AMDGPU_ABS32_LO; 60 case SIInstrInfo::MO_ABS32_HI: 61 return MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 62 } 63 } 64 65 bool AMDGPUMCInstLower::lowerOperand(const MachineOperand &MO, 66 MCOperand &MCOp) const { 67 switch (MO.getType()) { 68 default: 69 break; 70 case MachineOperand::MO_Immediate: 71 MCOp = MCOperand::createImm(MO.getImm()); 72 return true; 73 case MachineOperand::MO_Register: 74 MCOp = MCOperand::createReg(AMDGPU::getMCReg(MO.getReg(), ST)); 75 return true; 76 case MachineOperand::MO_MachineBasicBlock: 77 MCOp = MCOperand::createExpr( 78 MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), Ctx)); 79 return true; 80 case MachineOperand::MO_GlobalAddress: { 81 const GlobalValue *GV = MO.getGlobal(); 82 SmallString<128> SymbolName; 83 AP.getNameWithPrefix(SymbolName, GV); 84 MCSymbol *Sym = Ctx.getOrCreateSymbol(SymbolName); 85 const MCExpr *Expr = 86 MCSymbolRefExpr::create(Sym, getVariantKind(MO.getTargetFlags()),Ctx); 87 int64_t Offset = MO.getOffset(); 88 if (Offset != 0) { 89 Expr = MCBinaryExpr::createAdd(Expr, 90 MCConstantExpr::create(Offset, Ctx), Ctx); 91 } 92 MCOp = MCOperand::createExpr(Expr); 93 return true; 94 } 95 case MachineOperand::MO_ExternalSymbol: { 96 MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName())); 97 Sym->setExternal(true); 98 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx); 99 MCOp = MCOperand::createExpr(Expr); 100 return true; 101 } 102 case MachineOperand::MO_RegisterMask: 103 // Regmasks are like implicit defs. 104 return false; 105 case MachineOperand::MO_MCSymbol: 106 if (MO.getTargetFlags() == SIInstrInfo::MO_FAR_BRANCH_OFFSET) { 107 MCSymbol *Sym = MO.getMCSymbol(); 108 MCOp = MCOperand::createExpr(Sym->getVariableValue()); 109 return true; 110 } 111 break; 112 } 113 llvm_unreachable("unknown operand type"); 114 } 115 116 void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { 117 unsigned Opcode = MI->getOpcode(); 118 const auto *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo()); 119 120 // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We 121 // need to select it to the subtarget specific version, and there's no way to 122 // do that with a single pseudo source operation. 123 if (Opcode == AMDGPU::S_SETPC_B64_return || 124 Opcode == AMDGPU::S_SETPC_B64_return_gfx) 125 Opcode = AMDGPU::S_SETPC_B64; 126 else if (Opcode == AMDGPU::SI_CALL) { 127 // SI_CALL is just S_SWAPPC_B64 with an additional operand to track the 128 // called function (which we need to remove here). 129 OutMI.setOpcode(TII->pseudoToMCOpcode(AMDGPU::S_SWAPPC_B64)); 130 MCOperand Dest, Src; 131 lowerOperand(MI->getOperand(0), Dest); 132 lowerOperand(MI->getOperand(1), Src); 133 OutMI.addOperand(Dest); 134 OutMI.addOperand(Src); 135 return; 136 } else if (Opcode == AMDGPU::SI_TCRETURN) { 137 // TODO: How to use branch immediate and avoid register+add? 138 Opcode = AMDGPU::S_SETPC_B64; 139 } 140 141 int MCOpcode = TII->pseudoToMCOpcode(Opcode); 142 if (MCOpcode == -1) { 143 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 144 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have " 145 "a target-specific version: " + Twine(MI->getOpcode())); 146 } 147 148 OutMI.setOpcode(MCOpcode); 149 150 for (const MachineOperand &MO : MI->explicit_operands()) { 151 MCOperand MCOp; 152 lowerOperand(MO, MCOp); 153 OutMI.addOperand(MCOp); 154 } 155 156 int FIIdx = AMDGPU::getNamedOperandIdx(MCOpcode, AMDGPU::OpName::fi); 157 if (FIIdx >= (int)OutMI.getNumOperands()) 158 OutMI.addOperand(MCOperand::createImm(0)); 159 } 160 161 bool AMDGPUAsmPrinter::lowerOperand(const MachineOperand &MO, 162 MCOperand &MCOp) const { 163 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 164 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 165 return MCInstLowering.lowerOperand(MO, MCOp); 166 } 167 168 const MCExpr *AMDGPUAsmPrinter::lowerConstant(const Constant *CV) { 169 if (const MCExpr *E = lowerAddrSpaceCast(TM, CV, OutContext)) 170 return E; 171 return AsmPrinter::lowerConstant(CV); 172 } 173 174 void AMDGPUAsmPrinter::emitInstruction(const MachineInstr *MI) { 175 if (emitPseudoExpansionLowering(*OutStreamer, MI)) 176 return; 177 178 const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>(); 179 AMDGPUMCInstLower MCInstLowering(OutContext, STI, *this); 180 181 StringRef Err; 182 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) { 183 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext(); 184 C.emitError("Illegal instruction detected: " + Err); 185 MI->print(errs()); 186 } 187 188 if (MI->isBundle()) { 189 const MachineBasicBlock *MBB = MI->getParent(); 190 MachineBasicBlock::const_instr_iterator I = ++MI->getIterator(); 191 while (I != MBB->instr_end() && I->isInsideBundle()) { 192 emitInstruction(&*I); 193 ++I; 194 } 195 } else { 196 // We don't want these pseudo instructions encoded. They are 197 // placeholder terminator instructions and should only be printed as 198 // comments. 199 if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { 200 if (isVerbose()) 201 OutStreamer->emitRawComment(" return to shader part epilog"); 202 return; 203 } 204 205 if (MI->getOpcode() == AMDGPU::WAVE_BARRIER) { 206 if (isVerbose()) 207 OutStreamer->emitRawComment(" wave barrier"); 208 return; 209 } 210 211 if (MI->getOpcode() == AMDGPU::SI_MASKED_UNREACHABLE) { 212 if (isVerbose()) 213 OutStreamer->emitRawComment(" divergent unreachable"); 214 return; 215 } 216 217 if (MI->isMetaInstruction()) { 218 if (isVerbose()) 219 OutStreamer->emitRawComment(" meta instruction"); 220 return; 221 } 222 223 MCInst TmpInst; 224 MCInstLowering.lower(MI, TmpInst); 225 EmitToStreamer(*OutStreamer, TmpInst); 226 227 #ifdef EXPENSIVE_CHECKS 228 // Check getInstSizeInBytes on explicitly specified CPUs (it cannot 229 // work correctly for the generic CPU). 230 // 231 // The isPseudo check really shouldn't be here, but unfortunately there are 232 // some negative lit tests that depend on being able to continue through 233 // here even when pseudo instructions haven't been lowered. 234 // 235 // We also overestimate branch sizes with the offset bug. 236 if (!MI->isPseudo() && STI.isCPUStringValid(STI.getCPU()) && 237 (!STI.hasOffset3fBug() || !MI->isBranch())) { 238 SmallVector<MCFixup, 4> Fixups; 239 SmallVector<char, 16> CodeBytes; 240 raw_svector_ostream CodeStream(CodeBytes); 241 242 std::unique_ptr<MCCodeEmitter> InstEmitter(createSIMCCodeEmitter( 243 *STI.getInstrInfo(), *OutContext.getRegisterInfo(), OutContext)); 244 InstEmitter->encodeInstruction(TmpInst, CodeStream, Fixups, STI); 245 246 assert(CodeBytes.size() == STI.getInstrInfo()->getInstSizeInBytes(*MI)); 247 } 248 #endif 249 250 if (DumpCodeInstEmitter) { 251 // Disassemble instruction/operands to text 252 DisasmLines.resize(DisasmLines.size() + 1); 253 std::string &DisasmLine = DisasmLines.back(); 254 raw_string_ostream DisasmStream(DisasmLine); 255 256 AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(), *STI.getInstrInfo(), 257 *STI.getRegisterInfo()); 258 InstPrinter.printInst(&TmpInst, 0, StringRef(), STI, DisasmStream); 259 260 // Disassemble instruction/operands to hex representation. 261 SmallVector<MCFixup, 4> Fixups; 262 SmallVector<char, 16> CodeBytes; 263 raw_svector_ostream CodeStream(CodeBytes); 264 265 DumpCodeInstEmitter->encodeInstruction( 266 TmpInst, CodeStream, Fixups, MF->getSubtarget<MCSubtargetInfo>()); 267 HexLines.resize(HexLines.size() + 1); 268 std::string &HexLine = HexLines.back(); 269 raw_string_ostream HexStream(HexLine); 270 271 for (size_t i = 0; i < CodeBytes.size(); i += 4) { 272 unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i]; 273 HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord); 274 } 275 276 DisasmStream.flush(); 277 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size()); 278 } 279 } 280 } 281