Home
last modified time | relevance | path

Searched full:amdgpu (Results 1 – 25 of 300) sorted by relevance

12345678910>>...12

/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DSIRegisterInfo.cpp14 #include "AMDGPU.h"
33 "amdgpu-spill-sgpr-to-vgpr",
90 Register TmpVGPR = AMDGPU::NoRegister;
96 Register SavedExecReg = AMDGPU::NoRegister;
130 ExecReg = AMDGPU::EXEC_LO; in SGPRSpillBuilder()
131 MovOpc = AMDGPU::S_MOV_B32; in SGPRSpillBuilder()
132 NotOpc = AMDGPU::S_NOT_B32; in SGPRSpillBuilder()
134 ExecReg = AMDGPU::EXEC; in SGPRSpillBuilder()
135 MovOpc = AMDGPU::S_MOV_B64; in SGPRSpillBuilder()
136 NotOpc = AMDGPU::S_NOT_B64; in SGPRSpillBuilder()
[all …]
H A DSIInstrInfo.cpp15 #include "AMDGPU.h"
43 namespace llvm::AMDGPU { namespace
48 } // namespace llvm::AMDGPU
54 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
58 "amdgpu-fix-16-bit-physreg-copies",
64 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), in SIInstrInfo()
86 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue()
87 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue()
159 case AMDGPU::S_AND_SAVEEXEC_B32: in resultDependsOnExec()
160 case AMDGPU::S_AND_SAVEEXEC_B64: in resultDependsOnExec()
[all …]
H A DSILoadStoreOptimizer.cpp60 #include "AMDGPU.h"
167 AddrOp->getReg() != AMDGPU::SGPR_NULL) in hasMergeableAddress()
331 return AMDGPU::getMUBUFElements(Opc); in getOpcodeWidth()
335 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm(); in getOpcodeWidth()
339 return AMDGPU::getMTBUFElements(Opc); in getOpcodeWidth()
343 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM: in getOpcodeWidth()
344 case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR_IMM: in getOpcodeWidth()
345 case AMDGPU::S_LOAD_DWORD_IMM: in getOpcodeWidth()
346 case AMDGPU::GLOBAL_LOAD_DWORD: in getOpcodeWidth()
347 case AMDGPU::GLOBAL_LOAD_DWORD_SADDR: in getOpcodeWidth()
[all …]
H A DAMDGPUCombinerHelper.cpp23 case AMDGPU::G_FADD: in fnegFoldsIntoMI()
24 case AMDGPU::G_FSUB: in fnegFoldsIntoMI()
25 case AMDGPU::G_FMUL: in fnegFoldsIntoMI()
26 case AMDGPU::G_FMA: in fnegFoldsIntoMI()
27 case AMDGPU::G_FMAD: in fnegFoldsIntoMI()
28 case AMDGPU::G_FMINNUM: in fnegFoldsIntoMI()
29 case AMDGPU::G_FMAXNUM: in fnegFoldsIntoMI()
30 case AMDGPU::G_FMINNUM_IEEE: in fnegFoldsIntoMI()
31 case AMDGPU::G_FMAXNUM_IEEE: in fnegFoldsIntoMI()
32 case AMDGPU::G_FMINIMUM: in fnegFoldsIntoMI()
[all …]
H A DAMDGPURegisterBankInfo.cpp10 /// AMDGPU.
14 /// AMDGPU has unique register bank constraints that require special high level
73 #include "AMDGPU.h"
124 if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || in applyBank()
125 Opc == AMDGPU::G_SEXT) { in applyBank()
132 if (SrcBank == &AMDGPU::VCCRegBank) { in applyBank()
136 assert(NewBank == &AMDGPU::VGPRRegBank); in applyBank()
142 auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); in applyBank()
156 if (Opc == AMDGPU::G_TRUNC) { in applyBank()
159 assert(DstBank != &AMDGPU::VCCRegBank); in applyBank()
[all …]
H A DAMDGPUResourceUsageAnalysis.cpp27 #include "AMDGPU.h"
39 using namespace llvm::AMDGPU;
41 #define DEBUG_TYPE "amdgpu-resource-usage"
50 "amdgpu-assume-external-call-stack-size",
55 "amdgpu-assume-dynamic-stack-object-size",
90 return AMDGPU::getTotalNumVGPRs(ST.hasGFX90AInsts(), ArgNumAGPR, ArgNumVGPR); in getTotalNumVGPRs()
116 if (AMDGPU::getAMDHSACodeObjectVersion(M) >= AMDGPU::AMDHSA_COV5 || in runOnModule()
182 Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) || in analyzeResourceUsage()
183 MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI) || in analyzeResourceUsage()
194 (!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) && in analyzeResourceUsage()
[all …]
H A DAMDGPUSubtarget.cpp1 //===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
10 /// Implements the AMDGPU specific subclass of TargetSubtarget.
37 #define DEBUG_TYPE "amdgpu-subtarget"
46 "amdgpu-enable-power-sched",
51 "amdgpu-vgpr-index-mode",
55 static cl::opt<bool> UseAA("amdgpu-use-aa-in-codegen",
59 static cl::opt<unsigned> NSAThreshold("amdgpu-nsa-threshold",
108 if (!hasFeature(AMDGPU::FeatureWavefrontSize32) && in initializeSubtargetDependencies()
109 !hasFeature(AMDGPU::FeatureWavefrontSize64)) { in initializeSubtargetDependencies()
113 ToggleFeature(AMDGPU::FeatureWavefrontSize32); in initializeSubtargetDependencies()
[all …]
H A DSIOptimizeExecMasking.cpp9 #include "AMDGPU.h"
98 case AMDGPU::COPY: in isCopyFromExec()
99 case AMDGPU::S_MOV_B64: in isCopyFromExec()
100 case AMDGPU::S_MOV_B64_term: in isCopyFromExec()
101 case AMDGPU::S_MOV_B32: in isCopyFromExec()
102 case AMDGPU::S_MOV_B32_term: { in isCopyFromExec()
109 return AMDGPU::NoRegister; in isCopyFromExec()
115 case AMDGPU::COPY: in isCopyToExec()
116 case AMDGPU::S_MOV_B64: in isCopyToExec()
117 case AMDGPU::S_MOV_B32: { in isCopyToExec()
[all …]
H A DAMDGPUInstructionSelector.cpp10 /// AMDGPU.
15 #include "AMDGPU.h"
32 #define DEBUG_TYPE "amdgpu-isel"
72 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS in getWaveAddress()
91 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC && in isVCC()
96 return RB->getID() == AMDGPU::VCCRegBankID; in isVCC()
103 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); in constrainCopyLikeIntrin()
134 if (SrcReg == AMDGPU::SCC) { in selectCOPY()
154 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; in selectCOPY()
166 IsSGPR ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; in selectCOPY()
[all …]
H A DGCNDPPCombine.cpp40 #include "AMDGPU.h"
128 if (AMDGPU::isTrue16Inst(Op)) in isShrinkable()
130 if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) { in isShrinkable()
140 if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) || in isShrinkable()
141 !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) || in isShrinkable()
142 !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) || in isShrinkable()
143 !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0) || in isShrinkable()
144 !hasNoImmOrEqual(MI, AMDGPU::OpName::byte_sel, 0)) { in isShrinkable()
152 int DPP32 = AMDGPU::getDPPOp32(Op); in getDPPOp()
155 int E32 = AMDGPU::getVOPe32(Op); in getDPPOp()
[all …]
H A DSIPeepholeSDWA.cpp22 #include "AMDGPU.h"
116 using namespace AMDGPU::SDWA;
316 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { in getSrcMods()
317 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { in getSrcMods()
320 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { in getSrcMods()
321 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { in getSrcMods()
375 case AMDGPU::V_CVT_F32_FP8_sdwa: in convertToSDWA()
376 case AMDGPU::V_CVT_F32_BF8_sdwa: in convertToSDWA()
377 case AMDGPU::V_CVT_PK_F32_FP8_sdwa: in convertToSDWA()
378 case AMDGPU::V_CVT_PK_F32_BF8_sdwa: in convertToSDWA()
[all …]
H A DSIFoldOperands.cpp11 #include "AMDGPU.h"
160 case AMDGPU::V_MAC_F32_e64: in macToMad()
161 return AMDGPU::V_MAD_F32_e64; in macToMad()
162 case AMDGPU::V_MAC_F16_e64: in macToMad()
163 return AMDGPU::V_MAD_F16_e64; in macToMad()
164 case AMDGPU::V_FMAC_F32_e64: in macToMad()
165 return AMDGPU::V_FMA_F32_e64; in macToMad()
166 case AMDGPU::V_FMAC_F16_e64: in macToMad()
167 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad()
168 case AMDGPU::V_FMAC_F16_t16_e64: in macToMad()
[all …]
H A DAMDGPUPassRegistry.def1 //===- AMDGPUPassRegistry.def - Registry of AMDGPU passes -------*- C++ -*-===//
10 // AMDGPU backend.
19 MODULE_PASS("amdgpu-always-inline", AMDGPUAlwaysInlinePass())
20 MODULE_PASS("amdgpu-attributor", AMDGPUAttributorPass(*this))
21 MODULE_PASS("amdgpu-lower-buffer-fat-pointers",
23 MODULE_PASS("amdgpu-lower-ctor-dtor", AMDGPUCtorDtorLoweringPass())
24 MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
25 MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
26 MODULE_PASS("amdgpu-unify-metadata", AMDGPUUnifyMetadataPass())
32 FUNCTION_PASS("amdgpu-codegenprepare", AMDGPUCodeGenPreparePass(*this))
[all …]
H A DSIShrinkInstructions.cpp11 #include "AMDGPU.h"
93 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates()
152 if (AMDGPU::VGPR_32RegClass.contains(Reg) && in shouldShrinkTrue16()
153 !AMDGPU::VGPR_32_Lo128RegClass.contains(Reg)) in shouldShrinkTrue16()
207 return AMDGPU::V_NOT_B32_e32; in canModifyToInlineImmOp32()
212 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32; in canModifyToInlineImmOp32()
251 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode()); in shrinkScalarCompare()
257 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) { in shrinkScalarCompare()
261 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ? in shrinkScalarCompare()
262 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32; in shrinkScalarCompare()
[all …]
H A DGCNHazardRecognizer.cpp43 MFMAPaddingRatio("amdgpu-mfma-padding-ratio", cl::init(0), cl::Hidden,
63 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5; in GCNHazardRecognizer()
81 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64; in isDivFMas()
85 return Opcode == AMDGPU::S_GETREG_B32; in isSGetReg()
90 case AMDGPU::S_SETREG_B32: in isSSetReg()
91 case AMDGPU::S_SETREG_B32_mode: in isSSetReg()
92 case AMDGPU::S_SETREG_IMM32_B32: in isSSetReg()
93 case AMDGPU::S_SETREG_IMM32_B32_mode: in isSSetReg()
100 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; in isRWLane()
104 return Opcode == AMDGPU::S_RFE_B64; in isRFE()
[all …]
H A DAMDGPUArgumentUsageInfo.cpp10 #include "AMDGPU.h"
21 #define DEBUG_TYPE "amdgpu-argument-reg-usage-info"
96 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); in getPreloadedValue()
100 &AMDGPU::SGPR_64RegClass, in getPreloadedValue()
104 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
107 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
110 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
113 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
117 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); in getPreloadedValue()
120 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)}; in getPreloadedValue()
[all …]
H A DR600RegisterInfo.td3 let Namespace = "AMDGPU";
16 let Namespace = "AMDGPU";
22 let Namespace = "AMDGPU";
31 let Namespace = "AMDGPU";
152 def R600_ArrayBase : RegisterClass <"AMDGPU", [f32, i32], 32,
165 def R600_Addr : RegisterClass <"AMDGPU", [i32], 32, (add (sequence "Addr%u_X", 0, 127))>;
170 def R600_Addr_Y : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Y)>;
171 def R600_Addr_Z : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_Z)>;
172 def R600_Addr_W : RegisterClass <"AMDGPU", [i32], 32, (add Addr0_W)>;
174 def R600_LDS_SRC_REG : RegisterClass<"AMDGPU", [i32], 32,
[all …]
H A DAMDGPUAttributes.def17 AMDGPU_ATTRIBUTE(DISPATCH_PTR, "amdgpu-no-dispatch-ptr")
18 AMDGPU_ATTRIBUTE(QUEUE_PTR, "amdgpu-no-queue-ptr")
19 AMDGPU_ATTRIBUTE(DISPATCH_ID, "amdgpu-no-dispatch-id")
20 AMDGPU_ATTRIBUTE(IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr")
21 AMDGPU_ATTRIBUTE(MULTIGRID_SYNC_ARG, "amdgpu-no-multigrid-sync-arg")
22 AMDGPU_ATTRIBUTE(HOSTCALL_PTR, "amdgpu-no-hostcall-ptr")
23 AMDGPU_ATTRIBUTE(HEAP_PTR, "amdgpu-no-heap-ptr")
24 AMDGPU_ATTRIBUTE(WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x")
25 AMDGPU_ATTRIBUTE(WORKGROUP_ID_Y, "amdgpu-no-workgroup-id-y")
26 AMDGPU_ATTRIBUTE(WORKGROUP_ID_Z, "amdgpu-no-workgroup-id-z")
[all …]
H A DAMDGPURemoveIncompatibleFunctions.cpp15 #include "AMDGPU.h"
24 #define DEBUG_TYPE "amdgpu-remove-incompatible-functions"
30 AMDGPUFeatureKV[AMDGPU::NumSubtargetFeatures - 1];
47 return "AMDGPU Remove Incompatible Functions"; in getPassName()
92 constexpr unsigned FeaturesToCheck[] = {AMDGPU::FeatureGFX11Insts,
93 AMDGPU::FeatureGFX10Insts,
94 AMDGPU::FeatureGFX9Insts,
95 AMDGPU::FeatureGFX8Insts,
96 AMDGPU::FeatureDPP,
97 AMDGPU::Feature16BitInsts,
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/
H A DAMDGPUMCCodeEmitter.cpp1 //===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
10 /// The AMDGPU code emitter produces machine code that can be executed
149 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit16Encoding()
206 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit32Encoding()
246 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm)) in getLit64Encoding()
274 case AMDGPU::OPERAND_REG_IMM_INT32: in getLitEncoding()
275 case AMDGPU::OPERAND_REG_IMM_FP32: in getLitEncoding()
276 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: in getLitEncoding()
277 case AMDGPU::OPERAND_REG_INLINE_C_INT32: in getLitEncoding()
278 case AMDGPU::OPERAND_REG_INLINE_C_FP32: in getLitEncoding()
[all …]
H A DAMDGPUInstPrinter.cpp1 //===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===//
25 using namespace llvm::AMDGPU;
112 if (AMDGPU::isGFX12(STI) && IsVBuffer) in printOffset()
129 AMDGPU::isGFX12(STI); in printFlatOffset()
132 O << formatDec(SignExtend32(Imm, AMDGPU::getNumFlatOffsetBits(STI))); in printFlatOffset()
185 if (AMDGPU::isGFX12Plus(STI)) { in printCPol()
196 O << ((AMDGPU::isGFX940(STI) && in printCPol()
200 O << (AMDGPU::isGFX940(STI) ? " nt" : " slc"); in printCPol()
201 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI)) in printCPol()
203 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI)) in printCPol()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/
H A DAMDGPUAsmParser.cpp46 using namespace llvm::AMDGPU;
279 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrImmWithInt16InputMods()
283 return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::i16); in isRegOrImmWithIntT16InputMods()
287 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrImmWithInt32InputMods()
291 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i16); in isRegOrInlineImmWithInt16InputMods()
295 return isRegOrInline(AMDGPU::VS_32RegClassID, MVT::i32); in isRegOrInlineImmWithInt32InputMods()
299 return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64); in isRegOrImmWithInt64InputMods()
303 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16); in isRegOrImmWithFP16InputMods()
307 return isRegOrImmWithInputMods(AMDGPU::VS_16RegClassID, MVT::f16); in isRegOrImmWithFPT16InputMods()
311 return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32); in isRegOrImmWithFP32InputMods()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Disassembler/
H A DAMDGPUDisassembler.cpp1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
13 /// This file contains definition for AMDGPU ISA disassembler
40 #define DEBUG_TYPE "amdgpu-disassembler"
43 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
44 : AMDGPU::EncValues::SGPR_MAX_SI)
50 if (!STI.hasFeature(AMDGPU::FeatureWavefrontSize64) && in addDefaultWaveSize()
51 !STI.hasFeature(AMDGPU::FeatureWavefrontSize32)) { in addDefaultWaveSize()
56 STICopy.ToggleFeature(AMDGPU::FeatureWavefrontSize32); in addDefaultWaveSize()
68 CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) { in AMDGPUDisassembler()
70 if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus()) in AMDGPUDisassembler()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCA/
H A DAMDGPUCustomBehaviour.cpp26 case AMDGPU::S_WAITCNT: in postProcessInstruction()
27 case AMDGPU::S_WAITCNT_soft: in postProcessInstruction()
28 case AMDGPU::S_WAITCNT_EXPCNT: in postProcessInstruction()
29 case AMDGPU::S_WAITCNT_LGKMCNT: in postProcessInstruction()
30 case AMDGPU::S_WAITCNT_VMCNT: in postProcessInstruction()
31 case AMDGPU::S_WAITCNT_VSCNT: in postProcessInstruction()
32 case AMDGPU::S_WAITCNT_VSCNT_soft: in postProcessInstruction()
33 case AMDGPU::S_WAITCNT_EXPCNT_gfx10: in postProcessInstruction()
34 case AMDGPU::S_WAITCNT_LGKMCNT_gfx10: in postProcessInstruction()
35 case AMDGPU::S_WAITCNT_VMCNT_gfx10: in postProcessInstruction()
[all …]
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Utils/
H A DAMDGPUBaseInfo.cpp1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
10 #include "AMDGPU.h"
38 llvm::cl::init(llvm::AMDGPU::AMDHSA_COV5),
160 namespace AMDGPU { namespace
223 return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET; in getMultigridSyncArgImplicitArgPosition()
237 return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET; in getHostcallImplicitArgPosition()
248 return AMDGPU::ImplicitArg::DEFAULT_QUEUE_OFFSET; in getDefaultQueueImplicitArgPosition()
259 return AMDGPU::ImplicitArg::COMPLETION_ACTION_OFFSET; in getCompletionActionImplicitArgPosition()
529 if (ST.hasFeature(AMDGPU::FeatureGFX12Insts)) in getVOPDEncodingFamily()
531 if (ST.hasFeature(AMDGPU::FeatureGFX11Insts)) in getVOPDEncodingFamily()
[all …]

12345678910>>...12