/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIOptimizeExecMaskingPreRA.cpp | 40 MCRegister ExecReg; member in __anon314e3b690111::SIOptimizeExecMaskingPreRA 140 if (CmpReg == Register(ExecReg)) { in optimizeVcndVcmpPair() 144 } else if (And->getOperand(2).getReg() != Register(ExecReg)) { in optimizeVcndVcmpPair() 204 .addReg(ExecReg) in optimizeVcndVcmpPair() 299 if (XorTermMI.getOperand(1).getReg() != Register(ExecReg)) in optimizeElseBranch() 310 I->getOperand(1).getReg() == Register(ExecReg)) in optimizeElseBranch() 323 for (MCRegUnit Unit : TRI->regunits(ExecReg)) { in optimizeElseBranch() 360 ExecReg = MCRegister::from(Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC); in runOnMachineFunction() 453 if (!(I->isFullCopy() && I->getOperand(1).getReg() == Register(ExecReg))) in runOnMachineFunction() 468 MRI->replaceRegWith(SavedExec, ExecReg); in runOnMachineFunction()
|
H A D | SIPreEmitPeephole.cpp | 91 const unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in optimizeVccBranch() local 103 if (A->modifiesRegister(ExecReg, TRI)) in optimizeVccBranch() 118 if (Op1.getReg() != ExecReg && Op2.isReg() && Op2.getReg() == ExecReg) { in optimizeVccBranch() 122 if (Op1.getReg() != ExecReg) in optimizeVccBranch() 140 ModifiesExec |= M->modifiesRegister(ExecReg, TRI); in optimizeVccBranch() 182 .addReg(ExecReg); in optimizeVccBranch() 190 if (SReg == ExecReg) { in optimizeVccBranch()
|
H A D | SILateBranchLowering.cpp | 40 Register ExecReg; member in __anon03252fca0111::SILateBranchLowering 125 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(MovOpc), ExecReg) in expandChainCall() 155 ExecReg = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in runOnMachineFunction() 201 ExecReg) in runOnMachineFunction()
|
H A D | SILowerI1Copies.cpp | 522 ExecReg = AMDGPU::EXEC_LO; in PhiLoweringHelper() 530 ExecReg = AMDGPU::EXEC; in PhiLoweringHelper() 867 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg); in buildMergeLaneMasks() 870 .addReg(ExecReg) in buildMergeLaneMasks() 885 .addReg(ExecReg); in buildMergeLaneMasks() 896 .addReg(ExecReg); in buildMergeLaneMasks() 909 .addReg(ExecReg); in buildMergeLaneMasks() 913 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg); in buildMergeLaneMasks()
|
H A D | AMDGPUGlobalISelDivergenceLowering.cpp | 175 B.buildInstr(AndN2Op, {PrevMaskedReg}, {PrevRegCopy, ExecReg}); in buildMergeLaneMasks() 176 B.buildInstr(AndOp, {CurMaskedReg}, {ExecReg, CurRegCopy}); in buildMergeLaneMasks()
|
H A D | SILowerI1Copies.h | 57 Register ExecReg; variable
|
H A D | SIRegisterInfo.cpp | 108 Register ExecReg; member 130 ExecReg = AMDGPU::EXEC_LO; in SGPRSpillBuilder() 134 ExecReg = AMDGPU::EXEC; in SGPRSpillBuilder() 210 BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg); in prepare() 212 BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes); in prepare() 229 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); in prepare() 253 auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg) in restore() 264 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); in restore() 303 auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); in readWriteTmpVGPR() 306 auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); in readWriteTmpVGPR()
|
H A D | AMDGPURegisterBankInfo.cpp | 795 const unsigned ExecReg = Subtarget.isWave32() ? in executeInWaterfallLoop() local 954 .addDef(ExecReg) in executeInWaterfallLoop() 955 .addReg(ExecReg) in executeInWaterfallLoop() 966 .addReg(ExecReg); in executeInWaterfallLoop() 971 .addDef(ExecReg) in executeInWaterfallLoop()
|
H A D | SIISelLowering.cpp | 4895 unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; in lowerWaveReduce() local 4902 BuildMI(BB, I, DL, TII->get(MovOpc), LoopIterator).addReg(ExecReg); in lowerWaveReduce()
|