Lines Matching refs:RS

101   RegScavenger *RS;  member
114 RegScavenger *RS) in SGPRSpillBuilder()
116 MI->getOperand(0).isKill(), Index, RS) {} in SGPRSpillBuilder()
120 bool IsKill, int Index, RegScavenger *RS) in SGPRSpillBuilder()
122 Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()), in SGPRSpillBuilder()
172 assert(RS && "Cannot spill SGPR to memory without RegScavenger"); in prepare()
173 TmpVGPR = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI, false, in prepare()
191 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR); in prepare()
196 RS->setRegUsed(TmpVGPR); in prepare()
202 RS->setRegUsed(SuperReg); in prepare()
203 SavedExecReg = RS->scavengeRegisterBackwards(RC, MI, false, 0, false); in prepare()
208 RS->setRegUsed(SavedExecReg); in prepare()
221 if (RS->isRegUsed(AMDGPU::SCC)) in prepare()
277 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR, &*RestorePt); in restore()
296 if (RS->isRegUsed(AMDGPU::SCC)) in readWriteTmpVGPR()
1333 RegScavenger *RS, LiveRegUnits *LiveUnits) const { in buildSpillLoadStore() argument
1334 assert((!RS || !LiveUnits) && "Only RS or LiveUnits can be set but not both"); in buildSpillLoadStore()
1425 if (RS) { in buildSpillLoadStore()
1426 SOffset = RS->scavengeRegisterBackwards(AMDGPU::SGPR_32RegClass, MI, false, 0, false); in buildSpillLoadStore()
1429 CanClobberSCC = !RS->isRegUsed(AMDGPU::SCC); in buildSpillLoadStore()
1446 if (RS) { in buildSpillLoadStore()
1447 TmpOffsetVGPR = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI, false, 0); in buildSpillLoadStore()
1632 TmpOffsetVGPR = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, in buildSpillLoadStore()
1634 RS->setRegUsed(TmpOffsetVGPR); in buildSpillLoadStore()
1749 FrameReg, (int64_t)Offset * SB.EltSize, MMO, SB.RS); in buildVGPRSpillLoadStore()
1754 FrameReg, (int64_t)Offset * SB.EltSize, MMO, SB.RS); in buildVGPRSpillLoadStore()
1761 RegScavenger *RS, SlotIndexes *Indexes, in spillSGPR() argument
1764 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); in spillSGPR()
1884 RegScavenger *RS, SlotIndexes *Indexes, in restoreSGPR() argument
1887 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); in restoreSGPR()
1965 Register SGPR, RegScavenger *RS) const { in spillEmergencySGPR()
1967 RS); in spillEmergencySGPR()
2036 MachineBasicBlock::iterator MI, int FI, RegScavenger *RS, in eliminateSGPRToVGPRSpillFrameIndex() argument
2053 return spillSGPR(MI, FI, RS, Indexes, LIS, true, SpillToPhysVGPRLane); in eliminateSGPRToVGPRSpillFrameIndex()
2068 return restoreSGPR(MI, FI, RS, Indexes, LIS, true, SpillToPhysVGPRLane); in eliminateSGPRToVGPRSpillFrameIndex()
2076 RegScavenger *RS) const { in eliminateFrameIndex()
2112 return spillSGPR(MI, Index, RS); in eliminateFrameIndex()
2130 return restoreSGPR(MI, Index, RS); in eliminateFrameIndex()
2189 RS->isRegUsed(AMDGPU::SCC)); in eliminateFrameIndex()
2194 *MI->memoperands_begin(), RS); in eliminateFrameIndex()
2257 RS->isRegUsed(AMDGPU::SCC)); in eliminateFrameIndex()
2263 *MI->memoperands_begin(), RS); in eliminateFrameIndex()
2357 RS->scavengeRegisterBackwards(*RC, MI, false, 0, !UseSGPR); in eliminateFrameIndex()
2372 bool NeedSaveSCC = RS->isRegUsed(AMDGPU::SCC) && in eliminateFrameIndex()
2377 : RS->scavengeRegisterBackwards(AMDGPU::SReg_32_XM0RegClass, in eliminateFrameIndex()
2446 bool LiveSCC = RS->isRegUsed(AMDGPU::SCC) && in eliminateFrameIndex()
2455 : RS->scavengeRegisterBackwards(*RC, MI, false, 0); in eliminateFrameIndex()
2471 Register NewDest = RS->scavengeRegisterBackwards( in eliminateFrameIndex()
2481 if ((MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) != in eliminateFrameIndex()
2526 Register TmpScaledReg = RS->scavengeRegisterBackwards( in eliminateFrameIndex()
2592 Register TmpReg = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, in eliminateFrameIndex()