/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIRegisterInfo.cpp | 90 Register TmpVGPR = AMDGPU::NoRegister; 96 Register SavedExecReg = AMDGPU::NoRegister; 130 ExecReg = AMDGPU::EXEC_LO; in SGPRSpillBuilder() 131 MovOpc = AMDGPU::S_MOV_B32; in SGPRSpillBuilder() 132 NotOpc = AMDGPU::S_NOT_B32; in SGPRSpillBuilder() 134 ExecReg = AMDGPU::EXEC; in SGPRSpillBuilder() 135 MovOpc = AMDGPU::S_MOV_B64; in SGPRSpillBuilder() 136 NotOpc = AMDGPU::S_NOT_B64; in SGPRSpillBuilder() 139 assert(SuperReg != AMDGPU::M0 && "m0 should never spill"); in SGPRSpillBuilder() 140 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && in SGPRSpillBuilder() [all …]
|
H A D | SILoadStoreOptimizer.cpp | 167 AddrOp->getReg() != AMDGPU::SGPR_NULL) in hasMergeableAddress() 331 return AMDGPU::getMUBUFElements(Opc); in getOpcodeWidth() 335 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); in getOpcodeWidth() 339 return AMDGPU::getMTBUFElements(Opc); in getOpcodeWidth() 343 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: in getOpcodeWidth() 344 case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR_IMM: in getOpcodeWidth() 345 case AMDGPU::S_LOAD_DWORD_IMM: in getOpcodeWidth() 346 case AMDGPU::GLOBAL_LOAD_DWORD: in getOpcodeWidth() 347 case AMDGPU::GLOBAL_LOAD_DWORD_SADDR: in getOpcodeWidth() 348 case AMDGPU::GLOBAL_STORE_DWORD: in getOpcodeWidth() [all …]
|
H A D | SIInstrInfo.cpp | 43 namespace llvm::AMDGPU { namespace 64 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo() 86 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 87 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 159 case AMDGPU::S_AND_SAVEEXEC_B32: in resultDependsOnExec() 160 case AMDGPU::S_AND_SAVEEXEC_B64: in resultDependsOnExec() 162 case AMDGPU::S_AND_B32: in resultDependsOnExec() 163 case AMDGPU::S_AND_B64: in resultDependsOnExec() 164 if (!Use.readsRegister(AMDGPU::EXEC, /*TRI=*/nullptr)) in resultDependsOnExec() 177 case AMDGPU::V_READFIRSTLANE_B32: in resultDependsOnExec() [all …]
|
H A D | AMDGPUCombinerHelper.cpp | 23 case AMDGPU::G_FADD: in fnegFoldsIntoMI() 24 case AMDGPU::G_FSUB: in fnegFoldsIntoMI() 25 case AMDGPU::G_FMUL: in fnegFoldsIntoMI() 26 case AMDGPU::G_FMA: in fnegFoldsIntoMI() 27 case AMDGPU::G_FMAD: in fnegFoldsIntoMI() 28 case AMDGPU::G_FMINNUM: in fnegFoldsIntoMI() 29 case AMDGPU::G_FMAXNUM: in fnegFoldsIntoMI() 30 case AMDGPU::G_FMINNUM_IEEE: in fnegFoldsIntoMI() 31 case AMDGPU::G_FMAXNUM_IEEE: in fnegFoldsIntoMI() 32 case AMDGPU::G_FMINIMUM: in fnegFoldsIntoMI() [all …]
|
H A D | AMDGPURegisterBankInfo.cpp | 124 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank() 125 Opc == AMDGPU::G_SEXT) { in applyBank() 132 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank() 136 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank() 142 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank() 156 if (Opc == AMDGPU::G_TRUNC) { in applyBank() 159 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank() 174 assert(NewBank == &AMDGPU::VGPRRegBank && in applyBank() 176 assert((MI.getOpcode() != AMDGPU::G_TRUNC && in applyBank() 177 MI.getOpcode() != AMDGPU::G_ANYEXT) && in applyBank() [all …]
|
H A D | AMDGPUResourceUsageAnalysis.cpp | 39 using namespace llvm::AMDGPU; 90 return AMDGPU::getTotalNumVGPRs(ST.hasGFX90AInsts(), ArgNumAGPR, ArgNumVGPR); in getTotalNumVGPRs() 116 if (AMDGPU::getAMDHSACodeObjectVersion(M) >= AMDGPU::AMDHSA_COV5 || in runOnModule() 182 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || in analyzeResourceUsage() 183 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI) || in analyzeResourceUsage() 194 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && in analyzeResourceUsage() 195 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) && in analyzeResourceUsage() 196 !hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) { in analyzeResourceUsage() 211 MRI.isPhysRegUsed(AMDGPU::VCC_LO) || MRI.isPhysRegUsed(AMDGPU::VCC_HI); in analyzeResourceUsage() 217 MCPhysReg HighestVGPRReg = AMDGPU::NoRegister; in analyzeResourceUsage() [all …]
|
H A D | SIOptimizeExecMasking.cpp | 98 case AMDGPU::COPY: in isCopyFromExec() 99 case AMDGPU::S_MOV_B64: in isCopyFromExec() 100 case AMDGPU::S_MOV_B64_term: in isCopyFromExec() 101 case AMDGPU::S_MOV_B32: in isCopyFromExec() 102 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec() 109 return AMDGPU::NoRegister; in isCopyFromExec() 115 case AMDGPU::COPY: in isCopyToExec() 116 case AMDGPU::S_MOV_B64: in isCopyToExec() 117 case AMDGPU::S_MOV_B32: { in isCopyToExec() 123 case AMDGPU::S_MOV_B64_term: in isCopyToExec() [all …]
|
H A D | GCNDPPCombine.cpp | 128 if (AMDGPU::isTrue16Inst(Op)) in isShrinkable() 130 if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { in isShrinkable() 140 if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) || in isShrinkable() 141 !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) || in isShrinkable() 142 !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) || in isShrinkable() 143 !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0) || in isShrinkable() 144 !hasNoImmOrEqual(MI, AMDGPU::OpName::byte_sel, 0)) { in isShrinkable() 152 int DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp() 155 int E32 = AMDGPU::getVOPe32(Op); in getDPPOp() 156 DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32); in getDPPOp() [all …]
|
H A D | SIPeepholeSDWA.cpp | 116 using namespace AMDGPU::SDWA; 316 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods() 317 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods() 320 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods() 321 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods() 375 case AMDGPU::V_CVT_F32_FP8_sdwa: in convertToSDWA() 376 case AMDGPU::V_CVT_F32_BF8_sdwa: in convertToSDWA() 377 case AMDGPU::V_CVT_PK_F32_FP8_sdwa: in convertToSDWA() 378 case AMDGPU::V_CVT_PK_F32_BF8_sdwa: in convertToSDWA() 386 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA() [all …]
|
H A D | AMDGPUSubtarget.cpp | 108 if (!hasFeature(AMDGPU::FeatureWavefrontSize32) && in initializeSubtargetDependencies() 109 !hasFeature(AMDGPU::FeatureWavefrontSize64)) { in initializeSubtargetDependencies() 113 ToggleFeature(AMDGPU::FeatureWavefrontSize32); in initializeSubtargetDependencies() 128 ToggleFeature(AMDGPU::FeatureFlatForGlobal); in initializeSubtargetDependencies() 134 ToggleFeature(AMDGPU::FeatureFlatForGlobal); in initializeSubtargetDependencies() 156 if (AMDGPU::isGFX10Plus(*this) && in initializeSubtargetDependencies() 157 !getFeatureBits().test(AMDGPU::FeatureCuMode)) in initializeSubtargetDependencies() 179 if (hasFeature(AMDGPU::FeatureWavefrontSize32) == in checkSubtargetFeatures() 180 hasFeature(AMDGPU::FeatureWavefrontSize64)) { in checkSubtargetFeatures() 204 MaxWavesPerEU = AMDGPU::IsaInfo::getMaxWavesPerEU(this); in GCNSubtarget() [all …]
|
H A D | AMDGPUInstructionSelector.cpp | 72 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS in getWaveAddress() 91 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC && in isVCC() 96 return RB->getID() == AMDGPU::VCCRegBankID; in isVCC() 103 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); in constrainCopyLikeIntrin() 134 if (SrcReg == AMDGPU::SCC) { in selectCOPY() 154 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; in selectCOPY() 166 IsSGPR ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; in selectCOPY() 173 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) in selectCOPY() 254 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) in getSubOperand64() 270 case AMDGPU::sub0: in getSubOperand64() [all …]
|
H A D | SIFoldOperands.cpp | 160 case AMDGPU::V_MAC_F32_e64: in macToMad() 161 return AMDGPU::V_MAD_F32_e64; in macToMad() 162 case AMDGPU::V_MAC_F16_e64: in macToMad() 163 return AMDGPU::V_MAD_F16_e64; in macToMad() 164 case AMDGPU::V_FMAC_F32_e64: in macToMad() 165 return AMDGPU::V_FMA_F32_e64; in macToMad() 166 case AMDGPU::V_FMAC_F16_e64: in macToMad() 167 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad() 168 case AMDGPU::V_FMAC_F16_t16_e64: in macToMad() 169 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad() [all …]
|
H A D | SIShrinkInstructions.cpp | 93 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates() 152 if (AMDGPU::VGPR_32RegClass.contains(Reg) && in shouldShrinkTrue16() 153 !AMDGPU::VGPR_32_Lo128RegClass.contains(Reg)) in shouldShrinkTrue16() 207 return AMDGPU::V_NOT_B32_e32; in canModifyToInlineImmOp32() 212 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32; in canModifyToInlineImmOp32() 251 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); in shrinkScalarCompare() 257 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { in shrinkScalarCompare() 261 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? in shrinkScalarCompare() 262 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; in shrinkScalarCompare() 284 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); in shrinkMIMG() [all …]
|
H A D | GCNHazardRecognizer.cpp | 63 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5; in GCNHazardRecognizer() 81 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64; in isDivFMas() 85 return Opcode == AMDGPU::S_GETREG_B32; in isSGetReg() 90 case AMDGPU::S_SETREG_B32: in isSSetReg() 91 case AMDGPU::S_SETREG_B32_mode: in isSSetReg() 92 case AMDGPU::S_SETREG_IMM32_B32: in isSSetReg() 93 case AMDGPU::S_SETREG_IMM32_B32_mode: in isSSetReg() 100 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; in isRWLane() 104 return Opcode == AMDGPU::S_RFE_B64; in isRFE() 109 case AMDGPU::S_MOVRELS_B32: in isSMovRel() [all …]
|
H A D | SIInsertWaitcnts.cpp | 167 AMDGPU::S_WAIT_LOADCNT, AMDGPU::S_WAIT_DSCNT, AMDGPU::S_WAIT_EXPCNT, 168 AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT, 169 AMDGPU::S_WAIT_KMCNT}; 187 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode()); in getVmemType() 188 const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo = in getVmemType() 189 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); in getVmemType() 198 unsigned &getCounterRef(AMDGPU::Waitcnt &Wait, InstCounterType T) { in getCounterRef() 219 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) { in addWait() 224 void setNoWait(AMDGPU::Waitcnt &Wait, InstCounterType T) { in setNoWait() 228 unsigned getWait(AMDGPU::Waitcnt &Wait, InstCounterType T) { in getWait() [all …]
|
H A D | AMDGPUArgumentUsageInfo.cpp | 96 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); in getPreloadedValue() 100 &AMDGPU::SGPR_64RegClass, in getPreloadedValue() 104 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue() 107 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue() 110 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue() 113 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue() 117 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue() 120 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)}; in getPreloadedValue() 123 &AMDGPU::SGPR_64RegClass, in getPreloadedValue() 127 &AMDGPU::SGPR_64RegClass, in getPreloadedValue() [all …]
|
H A D | AMDGPURemoveIncompatibleFunctions.cpp | 30 AMDGPUFeatureKV[AMDGPU::NumSubtargetFeatures - 1]; 92 constexpr unsigned FeaturesToCheck[] = {AMDGPU::FeatureGFX11Insts, 93 AMDGPU::FeatureGFX10Insts, 94 AMDGPU::FeatureGFX9Insts, 95 AMDGPU::FeatureGFX8Insts, 96 AMDGPU::FeatureDPP, 97 AMDGPU::Feature16BitInsts, 98 AMDGPU::FeatureDot1Insts, 99 AMDGPU::FeatureDot2Insts, 100 AMDGPU::FeatureDot3Insts, [all …]
|
H A D | SIWholeQuadMode.cpp | 440 case AMDGPU::EXEC: in markOperand() 441 case AMDGPU::EXEC_LO: in markOperand() 460 markDefs(MI, LR, Unit, AMDGPU::NoSubRegister, Flag, Worklist); in markOperand() 511 } else if (Opcode == AMDGPU::WQM) { in scanInstructions() 516 } else if (Opcode == AMDGPU::SOFT_WQM) { in scanInstructions() 519 } else if (Opcode == AMDGPU::STRICT_WWM) { in scanInstructions() 526 } else if (Opcode == AMDGPU::STRICT_WQM || in scanInstructions() 534 if (Opcode == AMDGPU::STRICT_WQM) { in scanInstructions() 548 } else if (Opcode == AMDGPU::LDS_PARAM_LOAD || in scanInstructions() 549 Opcode == AMDGPU::DS_PARAM_LOAD || in scanInstructions() [all …]
|
H A D | SILateBranchLowering.cpp | 76 bool HasColorExports = AMDGPU::getHasColorExport(F); in generateEndPgm() 77 bool HasDepthExports = AMDGPU::getHasDepthExport(F); in generateEndPgm() 81 bool MustExport = !AMDGPU::isGFX10Plus(TII->getSubtarget()); in generateEndPgm() 88 ? AMDGPU::Exp::ET_NULL in generateEndPgm() 89 : (HasColorExports ? AMDGPU::Exp::ET_MRT0 : AMDGPU::Exp::ET_MRTZ); in generateEndPgm() 90 BuildMI(MBB, I, DL, TII->get(AMDGPU::EXP_DONE)) in generateEndPgm() 92 .addReg(AMDGPU::VGPR0, RegState::Undef) in generateEndPgm() 93 .addReg(AMDGPU::VGPR0, RegState::Undef) in generateEndPgm() 94 .addReg(AMDGPU::VGPR0, RegState::Undef) in generateEndPgm() 95 .addReg(AMDGPU::VGPR0, RegState::Undef) in generateEndPgm() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/ |
H A D | AMDGPUMCCodeEmitter.cpp | 149 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit16Encoding() 206 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit32Encoding() 246 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit64Encoding() 274 case AMDGPU::OPERAND_REG_IMM_INT32: in getLitEncoding() 275 case AMDGPU::OPERAND_REG_IMM_FP32: in getLitEncoding() 276 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: in getLitEncoding() 277 case AMDGPU::OPERAND_REG_INLINE_C_INT32: in getLitEncoding() 278 case AMDGPU::OPERAND_REG_INLINE_C_FP32: in getLitEncoding() 279 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: in getLitEncoding() 280 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: in getLitEncoding() [all …]
|
H A D | AMDGPUInstPrinter.cpp | 25 using namespace llvm::AMDGPU; 112 if (AMDGPU::isGFX12(STI) && IsVBuffer) in printOffset() 129 AMDGPU::isGFX12(STI); in printFlatOffset() 132 O << formatDec(SignExtend32(Imm, AMDGPU::getNumFlatOffsetBits(STI))); in printFlatOffset() 185 if (AMDGPU::isGFX12Plus(STI)) { in printCPol() 196 O << ((AMDGPU::isGFX940(STI) && in printCPol() 200 O << (AMDGPU::isGFX940(STI) ? " nt" : " slc"); in printCPol() 201 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI)) in printCPol() 203 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI)) in printCPol() 204 O << (AMDGPU::isGFX940(STI) ? " sc1" : " scc"); in printCPol() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCA/ |
H A D | AMDGPUCustomBehaviour.cpp | 26 case AMDGPU::S_WAITCNT: in postProcessInstruction() 27 case AMDGPU::S_WAITCNT_soft: in postProcessInstruction() 28 case AMDGPU::S_WAITCNT_EXPCNT: in postProcessInstruction() 29 case AMDGPU::S_WAITCNT_LGKMCNT: in postProcessInstruction() 30 case AMDGPU::S_WAITCNT_VMCNT: in postProcessInstruction() 31 case AMDGPU::S_WAITCNT_VSCNT: in postProcessInstruction() 32 case AMDGPU::S_WAITCNT_VSCNT_soft: in postProcessInstruction() 33 case AMDGPU::S_WAITCNT_EXPCNT_gfx10: in postProcessInstruction() 34 case AMDGPU::S_WAITCNT_LGKMCNT_gfx10: in postProcessInstruction() 35 case AMDGPU::S_WAITCNT_VMCNT_gfx10: in postProcessInstruction() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Disassembler/ |
H A D | AMDGPUDisassembler.cpp | 43 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \ 44 : AMDGPU::EncValues::SGPR_MAX_SI) 50 if (!STI.hasFeature(AMDGPU::FeatureWavefrontSize64) && in addDefaultWaveSize() 51 !STI.hasFeature(AMDGPU::FeatureWavefrontSize32)) { in addDefaultWaveSize() 56 STICopy.ToggleFeature(AMDGPU::FeatureWavefrontSize32); in addDefaultWaveSize() 68 CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) { in AMDGPUDisassembler() 70 if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus()) in AMDGPUDisassembler() 73 for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions()) in AMDGPUDisassembler() 82 CodeObjectVersion = AMDGPU::getAMDHSACodeObjectVersion(Version); in setABIVersion() 95 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); in insertNamedMCOperand() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
H A D | AMDGPUAsmParser.cpp | 46 using namespace llvm::AMDGPU; 279 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods() 283 return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::i16); in isRegOrImmWithIntT16InputMods() 287 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods() 291 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrInlineImmWithInt16InputMods() 295 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrInlineImmWithInt32InputMods() 299 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods() 303 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods() 307 return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::f16); in isRegOrImmWithFPT16InputMods() 311 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/ |
H A D | AMDGPUBaseInfo.cpp | 38 llvm::cl::init(llvm::AMDGPU::AMDHSA_COV5), 160 namespace AMDGPU { namespace 223 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET; in getMultigridSyncArgImplicitArgPosition() 237 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET; in getHostcallImplicitArgPosition() 248 return AMDGPU::ImplicitArg::DEFAULT_QUEUE_OFFSET; in getDefaultQueueImplicitArgPosition() 259 return AMDGPU::ImplicitArg::COMPLETION_ACTION_OFFSET; in getCompletionActionImplicitArgPosition() 529 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts)) in getVOPDEncodingFamily() 531 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts)) in getVOPDEncodingFamily() 549 return AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0X); in isVOPD() 553 return Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || in isMAC() [all …]
|