Lines Matching refs:constrainGenericRegister

119   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&  in constrainCopyLikeIntrin()
120 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); in constrainCopyLikeIntrin()
139 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); in selectCOPY()
144 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) in selectCOPY()
186 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) in selectCOPY()
200 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); in selectCOPY()
239 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); in selectPHI()
407 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) in selectG_ADD_SUB()
460 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || in selectG_UADDO_USUBO_UADDE_USUBE()
461 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || in selectG_UADDO_USUBO_UADDE_USUBE()
462 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_UADDO_USUBO_UADDE_USUBE()
466 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), in selectG_UADDO_USUBO_UADDE_USUBE()
514 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectG_EXTRACT()
566 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) in selectG_MERGE_VALUES()
570 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectG_MERGE_VALUES()
595 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) in selectG_UNMERGE_VALUES()
609 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) in selectG_UNMERGE_VALUES()
614 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) in selectG_UNMERGE_VALUES()
673 return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI); in selectG_BUILD_VECTOR()
679 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); in selectG_BUILD_VECTOR()
695 return RBI.constrainGenericRegister(Dst, RC, *MRI) && in selectG_BUILD_VECTOR()
696 RBI.constrainGenericRegister(Src0, RC, *MRI); in selectG_BUILD_VECTOR()
778 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { in selectG_IMPLICIT_DEF()
830 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_INSERT()
831 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || in selectG_INSERT()
832 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) in selectG_INSERT()
876 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || in selectInterpP1F16()
877 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || in selectInterpP1F16()
878 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) in selectInterpP1F16()
957 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); in selectWritelane()
1320 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_ICMP_or_FCMP()
1336 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), in selectG_ICMP_or_FCMP()
1365 return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); in selectIntrinsicCmp()
1393 RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); in selectIntrinsicCmp()
1454 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) in selectRelocConstant()
1513 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) in selectReturnAddress()
1610 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) in selectDSOrderedIntrinsic()
1685 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) in selectDSGWSIntrinsic()
1691 if (!RBI.constrainGenericRegister(BaseOffset, in selectDSGWSIntrinsic()
1715 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) in selectDSGWSIntrinsic()
1749 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) in selectDSAppendConsume()
2224 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_TRUNC()
2225 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { in selectG_TRUNC()
2308 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) in selectG_TRUNC()
2379 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && in selectG_SZA_EXT()
2380 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); in selectG_SZA_EXT()
2410 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) in selectG_SZA_EXT()
2419 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_SZA_EXT()
2442 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, in selectG_SZA_EXT()
2468 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); in selectG_SZA_EXT()
2484 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); in selectG_SZA_EXT()
2519 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); in selectG_FPEXT()
2606 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); in selectG_CONSTANT()
2632 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || in selectG_FNEG()
2633 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) in selectG_FNEG()
2681 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || in selectG_FABS()
2682 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) in selectG_FABS()
2892 return RBI.constrainGenericRegister( in selectG_GLOBAL_VALUE()
2940 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_PTRMASK()
2941 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_PTRMASK()
2942 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) in selectG_PTRMASK()
3059 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || in selectG_EXTRACT_VECTOR_ELT()
3060 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || in selectG_EXTRACT_VECTOR_ELT()
3061 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_EXTRACT_VECTOR_ELT()
3140 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3141 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3142 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || in selectG_INSERT_VECTOR_ELT()
3143 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) in selectG_INSERT_VECTOR_ELT()
3467 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) in selectWaveAddress()
3476 if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI)) in selectStackRestore()
5505 return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass, in selectSBarrierSignalIsfirst()
5609 return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass, in selectSBarrierLeave()