1 //===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// The AMDGPU code emitter produces machine code that can be executed 11 /// directly on the GPU device. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MCTargetDesc/AMDGPUFixupKinds.h" 16 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 17 #include "SIDefines.h" 18 #include "Utils/AMDGPUBaseInfo.h" 19 #include "llvm/ADT/APInt.h" 20 #include "llvm/MC/MCCodeEmitter.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCExpr.h" 23 #include "llvm/MC/MCInstrInfo.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSubtargetInfo.h" 26 #include "llvm/Support/Casting.h" 27 #include "llvm/Support/EndianStream.h" 28 #include "llvm/TargetParser/SubtargetFeature.h" 29 #include <optional> 30 31 using namespace llvm; 32 33 namespace { 34 35 class AMDGPUMCCodeEmitter : public MCCodeEmitter { 36 const MCRegisterInfo &MRI; 37 const MCInstrInfo &MCII; 38 39 public: 40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI) 41 : MRI(MRI), MCII(MCII) {} 42 43 /// Encode the instruction and write it to the OS. 44 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, 45 SmallVectorImpl<MCFixup> &Fixups, 46 const MCSubtargetInfo &STI) const override; 47 48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op, 49 SmallVectorImpl<MCFixup> &Fixups, 50 const MCSubtargetInfo &STI) const; 51 52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op, 53 SmallVectorImpl<MCFixup> &Fixups, 54 const MCSubtargetInfo &STI) const; 55 56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op, 57 SmallVectorImpl<MCFixup> &Fixups, 58 const MCSubtargetInfo &STI) const; 59 60 /// Use a fixup to encode the simm16 field for SOPP branch 61 /// instructions. 62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 63 SmallVectorImpl<MCFixup> &Fixups, 64 const MCSubtargetInfo &STI) const; 65 66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 67 SmallVectorImpl<MCFixup> &Fixups, 68 const MCSubtargetInfo &STI) const; 69 70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 71 SmallVectorImpl<MCFixup> &Fixups, 72 const MCSubtargetInfo &STI) const; 73 74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 75 SmallVectorImpl<MCFixup> &Fixups, 76 const MCSubtargetInfo &STI) const; 77 78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, 79 SmallVectorImpl<MCFixup> &Fixups, 80 const MCSubtargetInfo &STI) const; 81 82 private: 83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const; 84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO, 85 unsigned OpNo, APInt &Op, 86 SmallVectorImpl<MCFixup> &Fixups, 87 const MCSubtargetInfo &STI) const; 88 89 /// Encode an fp or int literal. 90 std::optional<uint32_t> getLitEncoding(const MCOperand &MO, 91 const MCOperandInfo &OpInfo, 92 const MCSubtargetInfo &STI) const; 93 94 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups, 95 APInt &Inst, APInt &Scratch, 96 const MCSubtargetInfo &STI) const; 97 }; 98 99 } // end anonymous namespace 100 101 MCCodeEmitter *llvm::createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, 102 MCContext &Ctx) { 103 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo()); 104 } 105 106 // Returns the encoding value to use if the given integer is an integer inline 107 // immediate value, or 0 if it is not. 108 template <typename IntTy> 109 static uint32_t getIntInlineImmEncoding(IntTy Imm) { 110 if (Imm >= 0 && Imm <= 64) 111 return 128 + Imm; 112 113 if (Imm >= -16 && Imm <= -1) 114 return 192 + std::abs(Imm); 115 116 return 0; 117 } 118 119 static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI) { 120 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 121 return IntImm == 0 ? 255 : IntImm; 122 } 123 124 static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI) { 125 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val)); 126 if (IntImm != 0) 127 return IntImm; 128 129 if (Val == 0x3800) // 0.5 130 return 240; 131 132 if (Val == 0xB800) // -0.5 133 return 241; 134 135 if (Val == 0x3C00) // 1.0 136 return 242; 137 138 if (Val == 0xBC00) // -1.0 139 return 243; 140 141 if (Val == 0x4000) // 2.0 142 return 244; 143 144 if (Val == 0xC000) // -2.0 145 return 245; 146 147 if (Val == 0x4400) // 4.0 148 return 246; 149 150 if (Val == 0xC400) // -4.0 151 return 247; 152 153 if (Val == 0x3118 && // 1.0 / (2.0 * pi) 154 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) 155 return 248; 156 157 return 255; 158 } 159 160 static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI) { 161 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val)); 162 if (IntImm != 0) 163 return IntImm; 164 165 if (Val == llvm::bit_cast<uint32_t>(0.5f)) 166 return 240; 167 168 if (Val == llvm::bit_cast<uint32_t>(-0.5f)) 169 return 241; 170 171 if (Val == llvm::bit_cast<uint32_t>(1.0f)) 172 return 242; 173 174 if (Val == llvm::bit_cast<uint32_t>(-1.0f)) 175 return 243; 176 177 if (Val == llvm::bit_cast<uint32_t>(2.0f)) 178 return 244; 179 180 if (Val == llvm::bit_cast<uint32_t>(-2.0f)) 181 return 245; 182 183 if (Val == llvm::bit_cast<uint32_t>(4.0f)) 184 return 246; 185 186 if (Val == llvm::bit_cast<uint32_t>(-4.0f)) 187 return 247; 188 189 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi) 190 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) 191 return 248; 192 193 return 255; 194 } 195 196 static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI) { 197 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val)); 198 if (IntImm != 0) 199 return IntImm; 200 201 if (Val == llvm::bit_cast<uint64_t>(0.5)) 202 return 240; 203 204 if (Val == llvm::bit_cast<uint64_t>(-0.5)) 205 return 241; 206 207 if (Val == llvm::bit_cast<uint64_t>(1.0)) 208 return 242; 209 210 if (Val == llvm::bit_cast<uint64_t>(-1.0)) 211 return 243; 212 213 if (Val == llvm::bit_cast<uint64_t>(2.0)) 214 return 244; 215 216 if (Val == llvm::bit_cast<uint64_t>(-2.0)) 217 return 245; 218 219 if (Val == llvm::bit_cast<uint64_t>(4.0)) 220 return 246; 221 222 if (Val == llvm::bit_cast<uint64_t>(-4.0)) 223 return 247; 224 225 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi) 226 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) 227 return 248; 228 229 return 255; 230 } 231 232 std::optional<uint32_t> 233 AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO, 234 const MCOperandInfo &OpInfo, 235 const MCSubtargetInfo &STI) const { 236 int64_t Imm; 237 if (MO.isExpr()) { 238 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr()); 239 if (!C) 240 return 255; 241 242 Imm = C->getValue(); 243 } else { 244 245 assert(!MO.isDFPImm()); 246 247 if (!MO.isImm()) 248 return {}; 249 250 Imm = MO.getImm(); 251 } 252 253 switch (OpInfo.OperandType) { 254 case AMDGPU::OPERAND_REG_IMM_INT32: 255 case AMDGPU::OPERAND_REG_IMM_FP32: 256 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 257 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 258 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 259 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 260 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 261 case AMDGPU::OPERAND_REG_IMM_V2INT32: 262 case AMDGPU::OPERAND_REG_IMM_V2FP32: 263 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 264 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 265 case AMDGPU::OPERAND_INLINE_SPLIT_BARRIER_INT32: 266 return getLit32Encoding(static_cast<uint32_t>(Imm), STI); 267 268 case AMDGPU::OPERAND_REG_IMM_INT64: 269 case AMDGPU::OPERAND_REG_IMM_FP64: 270 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 271 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 272 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 273 return getLit64Encoding(static_cast<uint64_t>(Imm), STI); 274 275 case AMDGPU::OPERAND_REG_IMM_INT16: 276 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 277 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 278 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI); 279 case AMDGPU::OPERAND_REG_IMM_FP16: 280 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 281 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 282 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 283 // FIXME Is this correct? What do inline immediates do on SI for f16 src 284 // which does not have f16 support? 285 return getLit16Encoding(static_cast<uint16_t>(Imm), STI); 286 case AMDGPU::OPERAND_REG_IMM_V2INT16: 287 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 288 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 289 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm)) 290 .value_or(255); 291 case AMDGPU::OPERAND_REG_IMM_V2FP16: 292 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 293 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: 294 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm)) 295 .value_or(255); 296 case AMDGPU::OPERAND_KIMM32: 297 case AMDGPU::OPERAND_KIMM16: 298 return MO.getImm(); 299 default: 300 llvm_unreachable("invalid operand size"); 301 } 302 } 303 304 uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const { 305 using namespace AMDGPU::VOP3PEncoding; 306 using namespace AMDGPU::OpName; 307 308 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) { 309 if (AMDGPU::hasNamedOperand(Opcode, src2)) 310 return 0; 311 if (AMDGPU::hasNamedOperand(Opcode, src1)) 312 return OP_SEL_HI_2; 313 if (AMDGPU::hasNamedOperand(Opcode, src0)) 314 return OP_SEL_HI_1 | OP_SEL_HI_2; 315 } 316 return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2; 317 } 318 319 static bool isVCMPX64(const MCInstrDesc &Desc) { 320 return (Desc.TSFlags & SIInstrFlags::VOP3) && 321 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC); 322 } 323 324 void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI, 325 SmallVectorImpl<char> &CB, 326 SmallVectorImpl<MCFixup> &Fixups, 327 const MCSubtargetInfo &STI) const { 328 int Opcode = MI.getOpcode(); 329 APInt Encoding, Scratch; 330 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI); 331 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 332 unsigned bytes = Desc.getSize(); 333 334 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions. 335 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel. 336 if ((Desc.TSFlags & SIInstrFlags::VOP3P) || 337 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi || 338 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) { 339 Encoding |= getImplicitOpSelHiEncoding(Opcode); 340 } 341 342 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC. 343 // Documentation requires dst to be encoded as EXEC (0x7E), 344 // but it looks like the actual value encoded for dst operand 345 // is ignored by HW. It was decided to define dst as "do not care" 346 // in td files to allow disassembler accept any dst value. 347 // However, dst is encoded as EXEC for compatibility with SP3. 348 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) { 349 assert((Encoding & 0xFF) == 0); 350 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) & 351 AMDGPU::HWEncoding::REG_IDX_MASK; 352 } 353 354 for (unsigned i = 0; i < bytes; i++) { 355 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i)); 356 } 357 358 // NSA encoding. 359 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) { 360 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 361 AMDGPU::OpName::vaddr0); 362 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 363 AMDGPU::OpName::srsrc); 364 assert(vaddr0 >= 0 && srsrc > vaddr0); 365 unsigned NumExtraAddrs = srsrc - vaddr0 - 1; 366 unsigned NumPadding = (-NumExtraAddrs) & 3; 367 368 for (unsigned i = 0; i < NumExtraAddrs; ++i) { 369 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups, 370 STI); 371 CB.push_back((uint8_t)Encoding.getLimitedValue()); 372 } 373 CB.append(NumPadding, 0); 374 } 375 376 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) || 377 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal))) 378 return; 379 380 // Do not print literals from SISrc Operands for insts with mandatory literals 381 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm)) 382 return; 383 384 // Check for additional literals 385 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) { 386 387 // Check if this operand should be encoded as [SV]Src 388 if (!AMDGPU::isSISrcOperand(Desc, i)) 389 continue; 390 391 // Is this operand a literal immediate? 392 const MCOperand &Op = MI.getOperand(i); 393 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI); 394 if (!Enc || *Enc != 255) 395 continue; 396 397 // Yes! Encode it 398 int64_t Imm = 0; 399 400 if (Op.isImm()) 401 Imm = Op.getImm(); 402 else if (Op.isExpr()) { 403 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr())) 404 Imm = C->getValue(); 405 406 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. 407 llvm_unreachable("Must be immediate or expr"); 408 409 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64) 410 Imm = Hi_32(Imm); 411 412 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little); 413 414 // Only one literal value allowed 415 break; 416 } 417 } 418 419 void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, 420 APInt &Op, 421 SmallVectorImpl<MCFixup> &Fixups, 422 const MCSubtargetInfo &STI) const { 423 const MCOperand &MO = MI.getOperand(OpNo); 424 425 if (MO.isExpr()) { 426 const MCExpr *Expr = MO.getExpr(); 427 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br; 428 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 429 Op = APInt::getZero(96); 430 } else { 431 getMachineOpValue(MI, MO, Op, Fixups, STI); 432 } 433 } 434 435 void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding( 436 const MCInst &MI, unsigned OpNo, APInt &Op, 437 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 438 auto Offset = MI.getOperand(OpNo).getImm(); 439 // VI only supports 20-bit unsigned offsets. 440 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset)); 441 Op = Offset; 442 } 443 444 void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, 445 APInt &Op, 446 SmallVectorImpl<MCFixup> &Fixups, 447 const MCSubtargetInfo &STI) const { 448 using namespace AMDGPU::SDWA; 449 450 uint64_t RegEnc = 0; 451 452 const MCOperand &MO = MI.getOperand(OpNo); 453 454 if (MO.isReg()) { 455 unsigned Reg = MO.getReg(); 456 RegEnc |= MRI.getEncodingValue(Reg); 457 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK; 458 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) { 459 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK; 460 } 461 Op = RegEnc; 462 return; 463 } else { 464 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 465 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI); 466 if (Enc && *Enc != 255) { 467 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK; 468 return; 469 } 470 } 471 472 llvm_unreachable("Unsupported operand kind"); 473 } 474 475 void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding( 476 const MCInst &MI, unsigned OpNo, APInt &Op, 477 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 478 using namespace AMDGPU::SDWA; 479 480 uint64_t RegEnc = 0; 481 482 const MCOperand &MO = MI.getOperand(OpNo); 483 484 unsigned Reg = MO.getReg(); 485 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) { 486 RegEnc |= MRI.getEncodingValue(Reg); 487 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 488 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK; 489 } 490 Op = RegEnc; 491 } 492 493 void AMDGPUMCCodeEmitter::getAVOperandEncoding( 494 const MCInst &MI, unsigned OpNo, APInt &Op, 495 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 496 unsigned Reg = MI.getOperand(OpNo).getReg(); 497 unsigned Enc = MRI.getEncodingValue(Reg); 498 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK; 499 bool IsVGPROrAGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR; 500 501 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma 502 // instructions use acc[0:1] modifier bits to distinguish. These bits are 503 // encoded as a virtual 9th bit of the register for these operands. 504 bool IsAGPR = false; 505 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) || 506 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) || 507 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) || 508 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) || 509 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) || 510 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) || 511 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) || 512 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) || 513 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) || 514 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) || 515 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) || 516 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) || 517 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) || 518 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg)) 519 IsAGPR = true; 520 521 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9); 522 } 523 524 static bool needsPCRel(const MCExpr *Expr) { 525 switch (Expr->getKind()) { 526 case MCExpr::SymbolRef: { 527 auto *SE = cast<MCSymbolRefExpr>(Expr); 528 MCSymbolRefExpr::VariantKind Kind = SE->getKind(); 529 return Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_LO && 530 Kind != MCSymbolRefExpr::VK_AMDGPU_ABS32_HI; 531 } 532 case MCExpr::Binary: { 533 auto *BE = cast<MCBinaryExpr>(Expr); 534 if (BE->getOpcode() == MCBinaryExpr::Sub) 535 return false; 536 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS()); 537 } 538 case MCExpr::Unary: 539 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr()); 540 case MCExpr::Target: 541 case MCExpr::Constant: 542 return false; 543 } 544 llvm_unreachable("invalid kind"); 545 } 546 547 void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI, 548 const MCOperand &MO, APInt &Op, 549 SmallVectorImpl<MCFixup> &Fixups, 550 const MCSubtargetInfo &STI) const { 551 if (MO.isReg()){ 552 unsigned Enc = MRI.getEncodingValue(MO.getReg()); 553 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK; 554 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR; 555 Op = Idx | (IsVGPR << 8); 556 return; 557 } 558 unsigned OpNo = &MO - MI.begin(); 559 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI); 560 } 561 562 void AMDGPUMCCodeEmitter::getMachineOpValueT16( 563 const MCInst &MI, unsigned OpNo, APInt &Op, 564 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 565 llvm_unreachable("TODO: Implement getMachineOpValueT16()."); 566 } 567 568 void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128( 569 const MCInst &MI, unsigned OpNo, APInt &Op, 570 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 571 const MCOperand &MO = MI.getOperand(OpNo); 572 if (MO.isReg()) { 573 uint16_t Encoding = MRI.getEncodingValue(MO.getReg()); 574 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK; 575 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI; 576 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR; 577 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!"); 578 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx; 579 return; 580 } 581 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI); 582 } 583 584 void AMDGPUMCCodeEmitter::getMachineOpValueCommon( 585 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op, 586 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const { 587 588 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { 589 // FIXME: If this is expression is PCRel or not should not depend on what 590 // the expression looks like. Given that this is just a general expression, 591 // it should probably be FK_Data_4 and whatever is producing 592 // 593 // s_add_u32 s2, s2, (extern_const_addrspace+16 594 // 595 // And expecting a PCRel should instead produce 596 // 597 // .Ltmp1: 598 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1 599 MCFixupKind Kind; 600 if (needsPCRel(MO.getExpr())) 601 Kind = FK_PCRel_4; 602 else 603 Kind = FK_Data_4; 604 605 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 606 uint32_t Offset = Desc.getSize(); 607 assert(Offset == 4 || Offset == 8); 608 609 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc())); 610 } 611 612 const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); 613 if (AMDGPU::isSISrcOperand(Desc, OpNo)) { 614 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) { 615 Op = *Enc; 616 return; 617 } 618 } else if (MO.isImm()) { 619 Op = MO.getImm(); 620 return; 621 } 622 623 llvm_unreachable("Encoding of this operand type is not supported yet."); 624 } 625 626 #include "AMDGPUGenMCCodeEmitter.inc" 627