/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIMemoryLegalizer.cpp | 285 AMDGPU::CPol::CPol Bit) const; 377 return enableNamedBit(MI, AMDGPU::CPol::GLC); in enableGLCBit() 383 return enableNamedBit(MI, AMDGPU::CPol::SLC); in enableSLCBit() 485 return enableNamedBit(MI, AMDGPU::CPol::SC0); in enableSC0Bit() 491 return enableNamedBit(MI, AMDGPU::CPol::SC1); in enableSC1Bit() 497 return enableNamedBit(MI, AMDGPU::CPol::NT); in enableNTBit() 549 return enableNamedBit(MI, AMDGPU::CPol::DLC); in enableDLCBit() 597 AMDGPU::CPol::CPol Value) const; 601 AMDGPU::CPol::CPol Value) const; 956 AMDGPU::CPol::CPol Bit) const { in enableNamedBit() [all …]
|
H A D | MIMGInstructions.td | 426 DMask:$dmask, UNorm:$unorm, CPol:$cpol, 439 DMask:$dmask, UNorm:$unorm, CPol:$cpol, 451 Dim:$dim, UNorm:$unorm, CPol:$cpol, 464 Dim:$dim, UNorm:$unorm, CPol:$cpol, 476 Dim:$dim, UNorm:$unorm, CPol:$cpol, 489 Dim:$dim, UNorm:$unorm, CPol:$cpol, 502 CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe), 516 CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe, 533 CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe, 684 DMask:$dmask, UNorm:$unorm, CPol:$cpol, [all …]
|
H A D | SILoadStoreOptimizer.cpp | 117 unsigned CPol = 0; member 814 CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); in setMI() 1045 if (CI.CPol != Paired.CPol) in offsetsCanBeCombined() 1489 New.addImm(CI.CPol).addMemOperand(combineKnownAdjacentMMOs(CI, Paired)); in mergeSMemLoadImmPair() 1528 .addImm(CI.CPol) // cpol in mergeBufferLoadPair() 1573 .addImm(CI.CPol) // cpol in mergeTBufferLoadPair() 1616 .addImm(CI.CPol) // cpol in mergeTBufferStorePair() 1644 .addImm(CI.CPol) in mergeFlatLoadPair() 1674 .addImm(CI.CPol) in mergeFlatStorePair() 1910 .addImm(CI.CPol) // cpol in mergeBufferStorePair()
|
H A D | AMDGPULowerBufferFatPointers.cpp | 1084 Aux |= AMDGPU::CPol::GLC; in handleMemoryInst() 1086 Aux |= AMDGPU::CPol::SLC; in handleMemoryInst() 1088 Aux |= (Aux & AMDGPU::CPol::GLC ? AMDGPU::CPol::DLC : 0); in handleMemoryInst() 1090 Aux |= AMDGPU::CPol::VOLATILE; in handleMemoryInst() 1224 Aux |= AMDGPU::CPol::SLC; in visitAtomicCmpXchgInst() 1226 Aux |= AMDGPU::CPol::VOLATILE; in visitAtomicCmpXchgInst()
|
H A D | SIDefines.h | 377 namespace CPol { 379 enum CPol { enum
|
H A D | SIInstrInfo.td | 875 ? AMDGPU::CPol::ALL 876 : AMDGPU::CPol::ALL_pregfx12), 883 ? AMDGPU::CPol::SWZ 884 : AMDGPU::CPol::SWZ_pregfx12); 890 ? AMDGPU::CPol::ALL 891 : AMDGPU::CPol::ALL_pregfx12); 892 return CurDAG->getTargetConstant(cpol | AMDGPU::CPol::GLC, SDLoc(N), MVT::i8); 1086 def CPol : CustomOperand<i32, 1>; 1087 def CPol_0 : DefaultOperand<CPol, 0>; 1088 def CPol_GLC1 : DefaultOperand<CPol, 1>; [all …]
|
H A D | AMDGPUInstructionSelector.cpp | 1896 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); in selectImageIntrinsic() local 1898 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization in selectImageIntrinsic() 1899 if (CPol & ~((IsGFX12Plus ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12) | in selectImageIntrinsic() 1900 AMDGPU::CPol::VOLATILE)) in selectImageIntrinsic() 2014 MIB.addImm(CPol); in selectImageIntrinsic() 3253 MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol in selectBufferLoadLds() 3254 MIB.addImm(Aux & AMDGPU::CPol::SWZ_pregfx12 ? 1 : 0); // swz in selectBufferLoadLds() 5671 (AMDGPU::isGFX12Plus(STI) ? AMDGPU::CPol::ALL in renderExtractCPol() 5672 : AMDGPU::CPol::ALL_pregfx12)); in renderExtractCPol() 5680 (AMDGPU::isGFX12Plus(STI) ? AMDGPU::CPol::SWZ in renderExtractSWZ() [all …]
|
H A D | SMInstructions.td | 124 !con((ins baseClass:$sbase), offsets.Ins, (ins CPol:$cpol)), 139 offsets.Ins, (ins CPol:$cpol)), 502 let InOperandList = (ins immPs.BaseClass:$sbase, smrd_offset_8:$offset, CPol:$cpol); 784 let InOperandList = (ins ps.BaseClass:$sbase, smrd_literal_offset:$offset, CPol:$cpol); 1449 let InOperandList = !con((ins BaseClass:$sbase), offsets.Ins, (ins CPol:$cpol));
|
H A D | FLATInstructions.td | 214 !if(HasTiedOutput, (ins CPol:$cpol, vdata_op:$vdst_in), 321 (ins flat_offset:$offset, CPol:$cpol)), 340 FLAT_Pseudo<opName, (outs), (ins CPol:$cpol), "$cpol", [(node)]> { 388 !if(HasTiedOutput, (ins CPol:$cpol, getLdStRegisterOperand<regClass>.ret:$vdst_in), 465 (ins VGPR_32:$vaddr, SReg_32_XEXEC_HI:$saddr, flat_offset:$offset, CPol:$cpol), 467 (ins SReg_32_XEXEC_HI:$saddr, flat_offset:$offset, CPol:$cpol), 469 (ins VGPR_32:$vaddr, flat_offset:$offset, CPol:$cpol), 470 (ins flat_offset:$offset, CPol:$cpol)))),
|
H A D | SIISelLowering.cpp | 1213 if (Aux->getZExtValue() & AMDGPU::CPol::VOLATILE) in getTgtMemIntrinsic() 8166 unsigned CPol = Op.getConstantOperandVal(ArgOffset + Intr->CachePolicyIndex); in lowerImage() local 8168 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization in lowerImage() 8169 if (CPol & ~((IsGFX12Plus ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12) | in lowerImage() 8170 AMDGPU::CPol::VOLATILE)) in lowerImage() 8192 Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32)); in lowerImage() 8559 unsigned CPol = Op.getConstantOperandVal(3); in LowerINTRINSIC_WO_CHAIN() local 8562 if (CPol & ~((Subtarget->getGeneration() >= AMDGPUSubtarget::GFX12) in LowerINTRINSIC_WO_CHAIN() 8563 ? AMDGPU::CPol::ALL in LowerINTRINSIC_WO_CHAIN() 8564 : AMDGPU::CPol::ALL_pregfx12)) in LowerINTRINSIC_WO_CHAIN() [all …]
|
H A D | BUFInstructions.td | 671 (ins SReg_128:$srsrc, SCSrc_b32:$soffset, Offset:$offset, CPol:$cpol, i1imm:$swz), 695 dag CPol = !if(vdata_in, (ins CPol_GLC_WithDefault:$cpol), 698 dag ret = !con(Data, MainInputs, CPol);
|
H A D | SIRegisterInfo.cpp | 1672 MIB.addImm(LastUse ? AMDGPU::CPol::TH_LU : 0); // cpol in buildSpillLoadStore()
|
H A D | SIInstrInfo.cpp | 6879 if (const MachineOperand *CPol = in legalizeOperands() local 6881 MIB.addImm(CPol->getImm()); in legalizeOperands()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/ |
H A D | AMDGPUInstPrinter.cpp | 186 const int64_t TH = Imm & CPol::TH; in printCPol() 187 const int64_t Scope = Imm & CPol::SCOPE; in printCPol() 195 if (Imm & CPol::GLC) in printCPol() 199 if (Imm & CPol::SLC) in printCPol() 201 if ((Imm & CPol::DLC) && AMDGPU::isGFX10Plus(STI)) in printCPol() 203 if ((Imm & CPol::SCC) && AMDGPU::isGFX90A(STI)) in printCPol() 205 if (Imm & ~CPol::ALL) in printCPol() 225 if (TH & AMDGPU::CPol::TH_ATOMIC_CASCADE) { in printTH() 226 if (Scope >= AMDGPU::CPol::SCOPE_DEV) in printTH() 227 O << "CASCADE" << (TH & AMDGPU::CPol::TH_ATOMIC_NT ? "_NT" : "_RT"); in printTH() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
H A D | AMDGPUAsmParser.cpp | 1768 const unsigned CPol); 4911 unsigned CPol = Inst.getOperand(CPolPos).getImm(); in validateCoherencyBits() local 4914 return validateTHAndScopeBits(Inst, Operands, CPol); in validateCoherencyBits() 4918 if (CPol && (isSI() || isCI())) { in validateCoherencyBits() 4923 if (CPol & ~(AMDGPU::CPol::GLC | AMDGPU::CPol::DLC)) { in validateCoherencyBits() 4929 if (isGFX90A() && !isGFX940() && (CPol & CPol::SCC)) { in validateCoherencyBits() 4947 if (!(TSFlags & SIInstrFlags::MIMG) && !(CPol & CPol::GLC)) { in validateCoherencyBits() 4953 if (CPol & CPol::GLC) { in validateCoherencyBits() 4969 const unsigned CPol) { in validateTHAndScopeBits() argument 4970 const unsigned TH = CPol & AMDGPU::CPol::TH; in validateTHAndScopeBits() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/Disassembler/ |
H A D | AMDGPUDisassembler.cpp | 654 unsigned CPol = in getInstruction() local 656 AMDGPU::CPol::GLC : 0; in getInstruction() 658 insertNamedMCOperand(MI, MCOperand::createImm(CPol), in getInstruction() 660 } else if (CPol) { in getInstruction() 661 MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol); in getInstruction()
|