Lines Matching refs:RBI
44 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, in AMDGPUInstructionSelector() argument
46 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), in AMDGPUInstructionSelector()
119 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && in constrainCopyLikeIntrin()
120 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); in constrainCopyLikeIntrin()
139 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); in selectCOPY()
144 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) in selectCOPY()
186 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) in selectCOPY()
200 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); in selectCOPY()
239 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); in selectPHI()
292 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); in selectG_AND_OR_XOR()
294 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_AND_OR_XOR()
308 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_AND_OR_XOR()
321 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_ADD_SUB()
334 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); in selectG_ADD_SUB()
342 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_ADD_SUB()
355 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); in selectG_ADD_SUB()
396 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) in selectG_ADD_SUB()
407 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) in selectG_ADD_SUB()
433 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_UADDO_USUBO_UADDE_USUBE()
460 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || in selectG_UADDO_USUBO_UADDE_USUBE()
461 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || in selectG_UADDO_USUBO_UADDE_USUBE()
462 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_UADDO_USUBO_UADDE_USUBE()
466 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), in selectG_UADDO_USUBO_UADDE_USUBE()
489 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_AMDGPU_MAD_64_32()
514 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectG_EXTRACT()
517 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); in selectG_EXTRACT()
528 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, in selectG_EXTRACT()
549 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_MERGE_VALUES()
566 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) in selectG_MERGE_VALUES()
570 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectG_MERGE_VALUES()
591 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); in selectG_UNMERGE_VALUES()
595 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) in selectG_UNMERGE_VALUES()
609 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) in selectG_UNMERGE_VALUES()
614 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) in selectG_UNMERGE_VALUES()
644 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); in selectG_BUILD_VECTOR()
673 return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI); in selectG_BUILD_VECTOR()
679 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); in selectG_BUILD_VECTOR()
695 return RBI.constrainGenericRegister(Dst, RC, *MRI) && in selectG_BUILD_VECTOR()
696 RBI.constrainGenericRegister(Src0, RC, *MRI); in selectG_BUILD_VECTOR()
705 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) in selectG_BUILD_VECTOR()
712 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) in selectG_BUILD_VECTOR()
759 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectG_BUILD_VECTOR()
768 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); in selectG_BUILD_VECTOR()
778 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { in selectG_IMPLICIT_DEF()
811 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_INSERT()
817 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); in selectG_INSERT()
818 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); in selectG_INSERT()
830 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_INSERT()
831 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || in selectG_INSERT()
832 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) in selectG_INSERT()
851 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && in selectG_SBFX_UBFX()
866 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectG_SBFX_UBFX()
876 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || in selectInterpP1F16()
877 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || in selectInterpP1F16()
878 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) in selectInterpP1F16()
957 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); in selectWritelane()
968 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectWritelane()
1009 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectDivScale()
1304 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); in selectG_ICMP_or_FCMP()
1319 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && in selectG_ICMP_or_FCMP()
1320 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_ICMP_or_FCMP()
1336 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), in selectG_ICMP_or_FCMP()
1338 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); in selectG_ICMP_or_FCMP()
1355 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); in selectIntrinsicCmp()
1365 return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); in selectIntrinsicCmp()
1393 RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); in selectIntrinsicCmp()
1394 if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI)) in selectIntrinsicCmp()
1452 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); in selectRelocConstant()
1454 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectRelocConstant()
1478 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectGroupStaticSize()
1498 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectGroupStaticSize()
1513 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) in selectReturnAddress()
1610 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) in selectDSOrderedIntrinsic()
1613 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); in selectDSOrderedIntrinsic()
1648 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); in selectDSGWSIntrinsic()
1685 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) in selectDSGWSIntrinsic()
1691 if (!RBI.constrainGenericRegister(BaseOffset, in selectDSGWSIntrinsic()
1715 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) in selectDSGWSIntrinsic()
1749 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) in selectDSAppendConsume()
1757 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectDSAppendConsume()
2035 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectImageIntrinsic()
2064 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectDSBvhStackIntrinsic()
2129 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); in selectG_SELECT()
2149 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); in selectG_SELECT()
2150 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); in selectG_SELECT()
2167 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); in selectG_SELECT()
2200 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); in selectG_TRUNC()
2207 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_TRUNC()
2224 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_TRUNC()
2225 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { in selectG_TRUNC()
2308 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) in selectG_TRUNC()
2336 return &RBI.getRegBankFromRegClass(*RC, LLT()); in getArtifactRegBank()
2366 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_SZA_EXT()
2379 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && in selectG_SZA_EXT()
2380 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); in selectG_SZA_EXT()
2394 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); in selectG_SZA_EXT()
2404 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); in selectG_SZA_EXT()
2410 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) in selectG_SZA_EXT()
2419 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_SZA_EXT()
2442 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, in selectG_SZA_EXT()
2468 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); in selectG_SZA_EXT()
2484 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_SZA_EXT()
2506 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); in selectG_FPEXT()
2519 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); in selectG_FPEXT()
2544 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_CONSTANT()
2555 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_CONSTANT()
2569 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); in selectG_CONSTANT()
2606 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); in selectG_CONSTANT()
2622 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); in selectG_FNEG()
2632 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || in selectG_FNEG()
2633 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) in selectG_FNEG()
2668 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); in selectG_FABS()
2681 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || in selectG_FABS()
2682 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) in selectG_FABS()
2737 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); in getAddrModeInfo()
2749 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; in isSGPR()
2771 return RBI.getRegBank(MI.getOperand(0).getReg(), *MRI, TRI)->getID() == in isInstrUniform()
2886 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_GLOBAL_VALUE()
2892 return RBI.constrainGenericRegister( in selectG_GLOBAL_VALUE()
2905 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_PTRMASK()
2906 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); in selectG_PTRMASK()
2907 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); in selectG_PTRMASK()
2928 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectG_PTRMASK()
2940 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_PTRMASK()
2941 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_PTRMASK()
2942 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) in selectG_PTRMASK()
3044 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectG_EXTRACT_VECTOR_ELT()
3045 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); in selectG_EXTRACT_VECTOR_ELT()
3046 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); in selectG_EXTRACT_VECTOR_ELT()
3059 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_EXTRACT_VECTOR_ELT()
3060 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_EXTRACT_VECTOR_ELT()
3061 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_EXTRACT_VECTOR_ELT()
3124 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); in selectG_INSERT_VECTOR_ELT()
3125 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); in selectG_INSERT_VECTOR_ELT()
3126 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); in selectG_INSERT_VECTOR_ELT()
3140 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3141 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3142 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3143 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_INSERT_VECTOR_ELT()
3275 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectBufferLoadLds()
3377 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectGlobalLoadLds()
3449 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); in selectWaveAddress()
3467 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) in selectWaveAddress()
3476 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI)) in selectStackRestore()
3695 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { in copyToVGPRIfSrcFolded()
4571 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) in selectScratchSVAddr()
5104 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); in shouldUseAddr64()
5145 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { in selectMUBUFAddr64Impl()
5147 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { in selectMUBUFAddr64Impl()
5160 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { in selectMUBUFAddr64Impl()
5495 if (!constrainSelectedInstRegOperands(*CopyMIB, TII, TRI, RBI)) in selectSBarrierSignalIsfirst()
5505 return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass, in selectSBarrierSignalIsfirst()
5583 constrainSelectedInstRegOperands(*CopyMIB, TII, TRI, RBI); in selectNamedBarrierInst()
5609 return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass, in selectSBarrierLeave()