Lines Matching refs:RI

65     RI(ST), ST(ST) {  in SIInstrInfo()
201 RI.isSGPRClass(MRI.getRegClass(Op.getReg()))) { in isSafeToSink()
642 const SIRegisterInfo &RI = TII.getRegisterInfo(); in indirectCopyToAGPR() local
652 if (!Def->modifiesRegister(SrcReg, &RI)) in indirectCopyToAGPR()
667 if (I->modifiesRegister(DefOp.getReg(), &RI)) in indirectCopyToAGPR()
696 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, in indirectCopyToAGPR()
713 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) in indirectCopyToAGPR()
746 const SIRegisterInfo &RI = TII.getRegisterInfo(); in expandSGPRCopy() local
747 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); in expandSGPRCopy()
753 Register DestSubReg = RI.getSubReg(DestReg, SubIdx); in expandSGPRCopy()
754 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); in expandSGPRCopy()
763 unsigned Channel = RI.getChannelFromSubReg(SubIdx); in expandSGPRCopy()
764 SubIdx = RI.getSubRegFromChannel(Channel, 2); in expandSGPRCopy()
765 DestSubReg = RI.getSubReg(DestReg, SubIdx); in expandSGPRCopy()
766 SrcSubReg = RI.getSubReg(SrcReg, SubIdx); in expandSGPRCopy()
791 LastMI->addRegisterKilled(SrcReg, &RI); in expandSGPRCopy()
798 const TargetRegisterClass *RC = RI.getPhysRegBaseClass(DestReg); in copyPhysReg()
799 unsigned Size = RI.getRegSizeInBits(*RC); in copyPhysReg()
800 const TargetRegisterClass *SrcRC = RI.getPhysRegBaseClass(SrcReg); in copyPhysReg()
801 unsigned SrcSize = RI.getRegSizeInBits(*SrcRC); in copyPhysReg()
811 MCRegister SubReg = RI.getSubReg(RegToFix, AMDGPU::lo16); in copyPhysReg()
820 RC = RI.getPhysRegBaseClass(DestReg); in copyPhysReg()
821 Size = RI.getRegSizeInBits(*RC); in copyPhysReg()
822 SrcRC = RI.getPhysRegBaseClass(SrcReg); in copyPhysReg()
823 SrcSize = RI.getRegSizeInBits(*SrcRC); in copyPhysReg()
943 const bool Overlap = RI.regsOverlap(SrcReg, DestReg); in copyPhysReg()
957 bool DstLow = !AMDGPU::isHi(DestReg, RI); in copyPhysReg()
958 bool SrcLow = !AMDGPU::isHi(SrcReg, RI); in copyPhysReg()
959 MCRegister NewDestReg = RI.get32BitRegister(DestReg); in copyPhysReg()
960 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); in copyPhysReg()
1028 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { in copyPhysReg()
1050 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); in copyPhysReg()
1051 if (RI.isSGPRClass(RC)) { in copyPhysReg()
1052 if (!RI.isSGPRClass(SrcRC)) { in copyPhysReg()
1056 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); in copyPhysReg()
1064 if (RI.isAGPRClass(RC)) { in copyPhysReg()
1065 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC)) in copyPhysReg()
1067 else if (RI.hasVGPRs(SrcRC) || in copyPhysReg()
1068 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC))) in copyPhysReg()
1072 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) { in copyPhysReg()
1074 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && in copyPhysReg()
1075 (RI.isProperlyAlignedRC(*RC) && in copyPhysReg()
1076 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { in copyPhysReg()
1096 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); in copyPhysReg()
1100 const bool Overlap = RI.regsOverlap(SrcReg, DestReg); in copyPhysReg()
1109 Register DestSubReg = RI.getSubReg(DestReg, SubIdx); in copyPhysReg()
1110 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); in copyPhysReg()
1201 if (RI.isSGPRClass(RegClass)) { in materializeImmediate()
1202 if (RI.getRegSizeInBits(*RegClass) > 32) { in materializeImmediate()
1211 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); in materializeImmediate()
1216 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); in materializeImmediate()
1234 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in insertVectorSelect()
1309 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); in insertVectorSelect()
1327 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); in insertVectorSelect()
1357 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); in insertEQ()
1370 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); in insertNE()
1380 if (RI.isAGPRClass(DstRC)) in getMovOpcode()
1382 if (RI.getRegSizeInBits(*DstRC) == 16) { in getMovOpcode()
1385 return RI.isSGPRClass(DstRC) ? AMDGPU::COPY : AMDGPU::V_MOV_B16_t16_e64; in getMovOpcode()
1387 if (RI.getRegSizeInBits(*DstRC) == 32) in getMovOpcode()
1388 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; in getMovOpcode()
1389 if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) in getMovOpcode()
1391 if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) in getMovOpcode()
1734 if (RI.isSGPRClass(RC)) { in storeRegToStackSlot()
1756 if (RI.spillSGPRToVGPR()) in storeRegToStackSlot()
1762 SpillSize, RI, *MFI); in storeRegToStackSlot()
1961 if (RI.isSGPRClass(RC)) { in loadRegFromStackSlot()
1975 if (RI.spillSGPRToVGPR()) in loadRegFromStackSlot()
1986 SpillSize, RI, *MFI); in loadRegFromStackSlot()
2186 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); in expandPostRAPseudo()
2187 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
2224 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { in expandPostRAPseudo()
2237 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) in expandPostRAPseudo()
2240 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) in expandPostRAPseudo()
2261 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); in expandPostRAPseudo()
2262 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
2341 if (RI.hasVGPRs(EltRC)) { in expandPostRAPseudo()
2344 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 in expandPostRAPseudo()
2356 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
2394 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
2438 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) in expandPostRAPseudo()
2451 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); in expandPostRAPseudo()
2452 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); in expandPostRAPseudo()
2548 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); in expandPostRAPseudo()
2563 const TargetRegisterInfo &RI) const { in reMaterialize()
2596 unsigned Offset = RI.getSubRegIdxOffset(UseMO->getSubReg()); in reMaterialize()
2597 unsigned SubregSize = RI.getSubRegIdxSize(UseMO->getSubReg()); in reMaterialize()
2613 RI.getAllocatableClass(getRegClass(TID, 0, &RI, *MF)); in reMaterialize()
2643 TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, RI); in reMaterialize()
2668 MovDPP.addDef(RI.getSubReg(Dst, Sub)); in expandMovDPP64()
2686 MovDPP.addReg(RI.getSubReg(Src, Sub)); in expandMovDPP64()
3238 return RI.hasVGPRs(RC) && NumInsts <= 6; in canInsertSelect()
3256 return RI.isSGPRClass(RC); in canInsertSelect()
3275 unsigned DstSize = RI.getRegSizeInBits(*DstRC); in insertSelect()
3459 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); in foldImmediate()
3466 if (RI.isAGPR(*MRI, DstReg)) { in foldImmediate()
3481 DstReg = RI.get32BitRegister(DstReg); in foldImmediate()
3489 !RI.getRegClass(NewMCID.operands()[0].RegClass)->contains(DstReg)) in foldImmediate()
3533 if (RI.isSGPRClass(MRI->getRegClass(RegSrc->getReg())) && in foldImmediate()
3537 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) in foldImmediate()
3613 RI.isSGPRReg(*MRI, Src0->getReg())) { in foldImmediate()
3626 else if (RI.isSGPRReg(*MRI, Src1->getReg())) in foldImmediate()
3705 Dummy0, &RI) || in checkInstOffsetsDoNotOverlap()
3707 Dummy1, &RI)) in checkInstOffsetsDoNotOverlap()
3946 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) { in convertToThreeAddress()
4101 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || in isSchedulingBoundary()
4172 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) in mayReadEXEC()
4176 return MI.readsRegister(AMDGPU::EXEC, &RI); in mayReadEXEC()
4187 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); in mayReadEXEC()
4373 return RI.opCanUseInlineConstant(OpInfo.OperandType); in isImmOperandLegal()
4376 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) in isImmOperandLegal()
4428 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) in canShrink()
4441 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || in canShrink()
4452 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || in canShrink()
4543 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); in usesConstantBus()
4645 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); in verifyInstruction()
4739 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); in verifyInstruction()
4740 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) { in verifyInstruction()
4742 RI.getSubRegisterClass(RC, MO.getSubReg()); in verifyInstruction()
4743 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); in verifyInstruction()
4749 if (!RC || !RI.isProperlyAlignedRC(*RC)) { in verifyInstruction()
4759 const TargetRegisterClass *RC = RI.getRegClass(RegClass); in verifyInstruction()
4783 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { in verifyInstruction()
4896 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; in verifyInstruction()
4954 return !RI.regsOverlap(SGPRUsed, SGPR); in verifyInstruction()
5099 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { in verifyInstruction()
5262 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { in verifyInstruction()
5268 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { in verifyInstruction()
5274 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || in verifyInstruction()
5275 (Data && RI.isAGPR(MRI, Data->getReg())) || in verifyInstruction()
5276 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { in verifyInstruction()
5291 return !(RI.getHWRegIndex(Reg) & 1); in verifyInstruction()
5293 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) && in verifyInstruction()
5294 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1); in verifyInstruction()
5320 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) { in verifyInstruction()
5355 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? in getVALUOp()
5548 adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI, in adjustAllocatableRegClass() argument
5580 return RI.getProperlyAlignedRC(RI.getRegClass(RCID)); in adjustAllocatableRegClass()
5609 return adjustAllocatableRegClass(ST, RI, MF.getRegInfo(), TID, RegClass, in getRegClass()
5623 return RI.getPhysRegBaseClass(Reg); in getOpRegClass()
5627 return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true); in getOpRegClass()
5636 const TargetRegisterClass *RC = RI.getRegClass(RCID); in legalizeOpWithMove()
5637 unsigned Size = RI.getRegSizeInBits(*RC); in legalizeOpWithMove()
5641 else if (RI.isSGPRClass(RC)) in legalizeOpWithMove()
5644 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); in legalizeOpWithMove()
5659 unsigned NewSubIdx = RI.composeSubRegIndices(SuperReg.getSubReg(), SubIdx); in buildExtractSubReg()
5699 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); in isLegalRegOperand()
5707 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); in isLegalRegOperand()
5711 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); in isLegalRegOperand()
5736 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; in isOperandLegal()
5782 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); in isOperandLegal()
5796 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) in isOperandLegal()
5800 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) in isOperandLegal()
5806 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) in isOperandLegal()
5811 RI.isSGPRReg(MRI, MO->getReg())) in isOperandLegal()
5863 RI.isSGPRReg(MRI, Src0.getReg())) in legalizeOperandsVOP2()
5871 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { in legalizeOperandsVOP2()
5877 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { in legalizeOperandsVOP2()
5888 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) in legalizeOperandsVOP2()
5891 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) in legalizeOperandsVOP2()
5897 if (!RI.isVGPR(MRI, MI.getOperand(Src2Idx).getReg())) in legalizeOperandsVOP2()
5910 RI.isVGPR(MRI, Src1.getReg())) { in legalizeOperandsVOP2()
5982 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { in legalizeOperandsVOP3()
5988 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { in legalizeOperandsVOP3()
6027 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) && in legalizeOperandsVOP3()
6033 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg()))) in legalizeOperandsVOP3()
6053 !RI.isVGPR(MRI, MI.getOperand(VOP3Idx[2]).getReg())) in legalizeOperandsVOP3()
6060 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); in readlaneVGPRToSGPR()
6062 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; in readlaneVGPRToSGPR()
6064 if (RI.hasAGPRs(VRC)) { in readlaneVGPRToSGPR()
6065 VRC = RI.getEquivalentVGPRClass(VRC); in readlaneVGPRToSGPR()
6085 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); in readlaneVGPRToSGPR()
6094 MIB.addImm(RI.getSubRegFromChannel(i)); in readlaneVGPRToSGPR()
6107 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { in legalizeOperandsSMRD()
6112 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { in legalizeOperandsSMRD()
6134 if (RI.isSGPRReg(MRI, SAddr.getReg())) in moveFlatAddrToVGPR()
6207 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) in legalizeOperandsFLAT()
6226 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( in legalizeGenericOperand()
6227 RI.getRegClassForReg(MRI, OpReg), OpSubReg); in legalizeGenericOperand()
6254 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && in legalizeGenericOperand()
6594 if (RI.hasVectorRegisters(OpRC)) { in legalizeOperands()
6604 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { in legalizeOperands()
6610 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) in legalizeOperands()
6611 ? RI.getEquivalentAGPRClass(SRC) in legalizeOperands()
6612 : RI.getEquivalentVGPRClass(SRC); in legalizeOperands()
6614 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) in legalizeOperands()
6615 ? RI.getEquivalentAGPRClass(VRC) in legalizeOperands()
6616 : RI.getEquivalentVGPRClass(VRC); in legalizeOperands()
6645 if (RI.hasVGPRs(DstRC)) { in legalizeOperands()
6655 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); in legalizeOperands()
6685 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) in legalizeOperands()
6699 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) in legalizeOperands()
6714 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) in legalizeOperands()
6719 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) in legalizeOperands()
6728 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { in legalizeOperands()
6773 !RI.isSGPRClass(MRI.getRegClass(Soffset->getReg()))) { in legalizeOperands()
6783 if (Rsrc->isReg() && !RI.isSGPRClass(MRI.getRegClass(Rsrc->getReg()))) { in legalizeOperands()
6817 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in legalizeOperands()
7140 Register VCC = RI.getVCC(); in moveToVALUImpl()
7195 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in moveToVALUImpl()
7206 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( in moveToVALUImpl()
7233 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); in moveToVALUImpl()
7296 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass()); in moveToVALUImpl()
7375 RI.isVGPR(MRI, Inst.getOperand(1).getReg())) { in moveToVALUImpl()
7385 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { in moveToVALUImpl()
7423 Src.isReg() && RI.isVGPR(MRI, Src.getReg())) in moveToVALUImpl()
7571 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); in lowerSelect()
7580 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) != in lowerSelect()
7604 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest.getReg()))); in lowerSelect()
7680 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); in lowerScalarXnor()
7682 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); in lowerScalarXnor()
7793 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
7799 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); in splitScalar64BitUnaryOp()
7801 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitUnaryOp()
7855 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0); in splitScalarSMulU64()
7856 if (RI.isSGPRClass(Src0SubRC)) in splitScalarSMulU64()
7857 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC); in splitScalarSMulU64()
7859 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0); in splitScalarSMulU64()
7860 if (RI.isSGPRClass(Src1SubRC)) in splitScalarSMulU64()
7861 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC); in splitScalarSMulU64()
7964 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0); in splitScalarSMulPseudo()
7965 if (RI.isSGPRClass(Src0SubRC)) in splitScalarSMulPseudo()
7966 Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC); in splitScalarSMulPseudo()
7968 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0); in splitScalarSMulPseudo()
7969 if (RI.isSGPRClass(Src1SubRC)) in splitScalarSMulPseudo()
7970 Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC); in splitScalarSMulPseudo()
8027 RI.getSubRegisterClass(Src0RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
8033 RI.getSubRegisterClass(Src1RC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
8045 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); in splitScalar64BitBinaryOp()
8047 RI.getSubRegisterClass(NewDestRC, AMDGPU::sub0); in splitScalar64BitBinaryOp()
8095 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { in splitScalar64BitXnor()
8137 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0); in splitScalar64BitBCNT()
8241 RI.getSubRegisterClass(SrcRC, AMDGPU::sub0); in splitScalar64BitCountOp()
8295 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { in addUsersToMoveToVALUWorklist()
8395 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, &RI, false); in addSCCDefUsersToVALUWorklist()
8412 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, &RI, false, false) != -1) in addSCCDefUsersToVALUWorklist()
8433 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) in addSCCDefsToVALUWorklist()
8435 if (MI.definesRegister(AMDGPU::SCC, &RI)) { in addSCCDefsToVALUWorklist()
8459 if (RI.isAGPRClass(SrcRC)) { in getDestEquivalentVGPRClass()
8460 if (RI.isAGPRClass(NewDstRC)) in getDestEquivalentVGPRClass()
8467 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
8470 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
8476 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) in getDestEquivalentVGPRClass()
8479 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); in getDestEquivalentVGPRClass()
8524 RI.getRegClass(Desc.operands()[Idx].RegClass); in findUsedSGPR()
8525 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); in findUsedSGPR()
8532 if (RI.isSGPRClass(RegRC)) in findUsedSGPR()
8781 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformIfRegion()
8808 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
8809 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
8816 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); in convertNonUniformLoopRegion()
8932 IsNullOrVectorRegister = !RI.isSGPRClass(RI.getRegClassForReg(MRI, Reg)); in isBasicBlockPrologue()
8940 MI.modifiesRegister(AMDGPU::EXEC, &RI))); in isBasicBlockPrologue()
8952 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); in getAddNoCarry()
8953 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); in getAddNoCarry()
8969 ? Register(RI.getVCC()) in getAddNoCarry()
8971 *RI.getBoolRC(), I, /* RestoreAfter */ false, in getAddNoCarry()
9037 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); in isBufferSMRD()
9616 RI.getPhysRegBaseClass(srcOp.getReg()); in getInstructionUniformity()
9617 return RI.isSGPRClass(regClass) ? InstructionUniformity::AlwaysUniform in getInstructionUniformity()
9670 const RegisterBank *RegBank = RBI->getRegBank(Reg, MRI, RI); in getInstructionUniformity()
9849 if (I->modifiesRegister(AMDGPU::SCC, &RI) || in optimizeCompareInstr()
9850 I->killsRegister(AMDGPU::SCC, &RI)) in optimizeCompareInstr()
9931 bool IsAGPR = RI.isAGPR(MRI, DataReg); in enforceOperandRCAlignment()