10b57cec5SDimitry Andric //===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring 100b57cec5SDimitry Andric // there is at most one ret and one unreachable instruction, it ensures there is 110b57cec5SDimitry Andric // at most one divergent exiting block. 120b57cec5SDimitry Andric // 130b57cec5SDimitry Andric // StructurizeCFG can't deal with multi-exit regions formed by branches to 140b57cec5SDimitry Andric // multiple return nodes. It is not desirable to structurize regions with 150b57cec5SDimitry Andric // uniform branches, so unifying those to the same return block as divergent 160b57cec5SDimitry Andric // branches inhibits use of scalar branching. It still can't deal with the case 170b57cec5SDimitry Andric // where one branch goes to return, and one unreachable. Replace unreachable in 180b57cec5SDimitry Andric // this case with a return. 190b57cec5SDimitry Andric // 200b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 210b57cec5SDimitry Andric 220b57cec5SDimitry Andric #include "AMDGPU.h" 23*e8d8bef9SDimitry Andric #include "SIDefines.h" 240b57cec5SDimitry Andric #include "llvm/ADT/ArrayRef.h" 250b57cec5SDimitry Andric #include "llvm/ADT/SmallPtrSet.h" 260b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h" 270b57cec5SDimitry Andric #include "llvm/ADT/StringRef.h" 28*e8d8bef9SDimitry Andric #include "llvm/Analysis/DomTreeUpdater.h" 290b57cec5SDimitry Andric #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 300b57cec5SDimitry Andric #include "llvm/Analysis/PostDominators.h" 310b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h" 320b57cec5SDimitry Andric #include "llvm/IR/BasicBlock.h" 330b57cec5SDimitry Andric #include "llvm/IR/CFG.h" 340b57cec5SDimitry Andric #include "llvm/IR/Constants.h" 35*e8d8bef9SDimitry Andric #include "llvm/IR/Dominators.h" 360b57cec5SDimitry Andric #include "llvm/IR/Function.h" 37*e8d8bef9SDimitry Andric #include "llvm/IR/IRBuilder.h" 380b57cec5SDimitry Andric #include "llvm/IR/InstrTypes.h" 390b57cec5SDimitry Andric #include "llvm/IR/Instructions.h" 400b57cec5SDimitry Andric #include "llvm/IR/Intrinsics.h" 41*e8d8bef9SDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h" 420b57cec5SDimitry Andric #include "llvm/IR/Type.h" 43480093f4SDimitry Andric #include "llvm/InitializePasses.h" 440b57cec5SDimitry Andric #include "llvm/Pass.h" 450b57cec5SDimitry Andric #include "llvm/Support/Casting.h" 460b57cec5SDimitry Andric #include "llvm/Transforms/Scalar.h" 470b57cec5SDimitry Andric #include "llvm/Transforms/Utils.h" 48480093f4SDimitry Andric #include "llvm/Transforms/Utils/Local.h" 490b57cec5SDimitry Andric 500b57cec5SDimitry Andric using namespace llvm; 510b57cec5SDimitry Andric 520b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes" 530b57cec5SDimitry Andric 540b57cec5SDimitry Andric namespace { 550b57cec5SDimitry Andric 560b57cec5SDimitry Andric class AMDGPUUnifyDivergentExitNodes : public FunctionPass { 570b57cec5SDimitry Andric public: 580b57cec5SDimitry Andric static char ID; // Pass identification, replacement for typeid 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) { 610b57cec5SDimitry Andric initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry()); 620b57cec5SDimitry Andric } 630b57cec5SDimitry Andric 640b57cec5SDimitry Andric // We can preserve non-critical-edgeness when we unify function exit nodes 650b57cec5SDimitry Andric void getAnalysisUsage(AnalysisUsage &AU) const override; 660b57cec5SDimitry Andric bool runOnFunction(Function &F) override; 670b57cec5SDimitry Andric }; 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric } // end anonymous namespace 700b57cec5SDimitry Andric 710b57cec5SDimitry Andric char AMDGPUUnifyDivergentExitNodes::ID = 0; 720b57cec5SDimitry Andric 730b57cec5SDimitry Andric char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID; 740b57cec5SDimitry Andric 750b57cec5SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, 760b57cec5SDimitry Andric "Unify divergent function exit nodes", false, false) 77*e8d8bef9SDimitry Andric INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 780b57cec5SDimitry Andric INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 790b57cec5SDimitry Andric INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 800b57cec5SDimitry Andric INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, 810b57cec5SDimitry Andric "Unify divergent function exit nodes", false, false) 820b57cec5SDimitry Andric 830b57cec5SDimitry Andric void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{ 84*e8d8bef9SDimitry Andric if (RequireAndPreserveDomTree) 85*e8d8bef9SDimitry Andric AU.addRequired<DominatorTreeWrapperPass>(); 86*e8d8bef9SDimitry Andric 870b57cec5SDimitry Andric AU.addRequired<PostDominatorTreeWrapperPass>(); 880b57cec5SDimitry Andric 890b57cec5SDimitry Andric AU.addRequired<LegacyDivergenceAnalysis>(); 900b57cec5SDimitry Andric 91*e8d8bef9SDimitry Andric if (RequireAndPreserveDomTree) { 92*e8d8bef9SDimitry Andric AU.addPreserved<DominatorTreeWrapperPass>(); 93*e8d8bef9SDimitry Andric // FIXME: preserve PostDominatorTreeWrapperPass 94*e8d8bef9SDimitry Andric } 95*e8d8bef9SDimitry Andric 960b57cec5SDimitry Andric // No divergent values are changed, only blocks and branch edges. 970b57cec5SDimitry Andric AU.addPreserved<LegacyDivergenceAnalysis>(); 980b57cec5SDimitry Andric 990b57cec5SDimitry Andric // We preserve the non-critical-edgeness property 1000b57cec5SDimitry Andric AU.addPreservedID(BreakCriticalEdgesID); 1010b57cec5SDimitry Andric 1020b57cec5SDimitry Andric // This is a cluster of orthogonal Transforms 1030b57cec5SDimitry Andric AU.addPreservedID(LowerSwitchID); 1040b57cec5SDimitry Andric FunctionPass::getAnalysisUsage(AU); 1050b57cec5SDimitry Andric 1060b57cec5SDimitry Andric AU.addRequired<TargetTransformInfoWrapperPass>(); 1070b57cec5SDimitry Andric } 1080b57cec5SDimitry Andric 1090b57cec5SDimitry Andric /// \returns true if \p BB is reachable through only uniform branches. 1100b57cec5SDimitry Andric /// XXX - Is there a more efficient way to find this? 1110b57cec5SDimitry Andric static bool isUniformlyReached(const LegacyDivergenceAnalysis &DA, 1120b57cec5SDimitry Andric BasicBlock &BB) { 1130b57cec5SDimitry Andric SmallVector<BasicBlock *, 8> Stack; 1140b57cec5SDimitry Andric SmallPtrSet<BasicBlock *, 8> Visited; 1150b57cec5SDimitry Andric 1160b57cec5SDimitry Andric for (BasicBlock *Pred : predecessors(&BB)) 1170b57cec5SDimitry Andric Stack.push_back(Pred); 1180b57cec5SDimitry Andric 1190b57cec5SDimitry Andric while (!Stack.empty()) { 1200b57cec5SDimitry Andric BasicBlock *Top = Stack.pop_back_val(); 1210b57cec5SDimitry Andric if (!DA.isUniform(Top->getTerminator())) 1220b57cec5SDimitry Andric return false; 1230b57cec5SDimitry Andric 1240b57cec5SDimitry Andric for (BasicBlock *Pred : predecessors(Top)) { 1250b57cec5SDimitry Andric if (Visited.insert(Pred).second) 1260b57cec5SDimitry Andric Stack.push_back(Pred); 1270b57cec5SDimitry Andric } 1280b57cec5SDimitry Andric } 1290b57cec5SDimitry Andric 1300b57cec5SDimitry Andric return true; 1310b57cec5SDimitry Andric } 1320b57cec5SDimitry Andric 13313138422SDimitry Andric static void removeDoneExport(Function &F) { 13413138422SDimitry Andric ConstantInt *BoolFalse = ConstantInt::getFalse(F.getContext()); 13513138422SDimitry Andric for (BasicBlock &BB : F) { 13613138422SDimitry Andric for (Instruction &I : BB) { 13713138422SDimitry Andric if (IntrinsicInst *Intrin = llvm::dyn_cast<IntrinsicInst>(&I)) { 13813138422SDimitry Andric if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp) { 13913138422SDimitry Andric Intrin->setArgOperand(6, BoolFalse); // done 14013138422SDimitry Andric } else if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp_compr) { 14113138422SDimitry Andric Intrin->setArgOperand(4, BoolFalse); // done 14213138422SDimitry Andric } 14313138422SDimitry Andric } 14413138422SDimitry Andric } 14513138422SDimitry Andric } 14613138422SDimitry Andric } 14713138422SDimitry Andric 148*e8d8bef9SDimitry Andric static BasicBlock *unifyReturnBlockSet(Function &F, DomTreeUpdater &DTU, 1490b57cec5SDimitry Andric ArrayRef<BasicBlock *> ReturningBlocks, 15013138422SDimitry Andric bool InsertExport, 1510b57cec5SDimitry Andric const TargetTransformInfo &TTI, 1520b57cec5SDimitry Andric StringRef Name) { 1530b57cec5SDimitry Andric // Otherwise, we need to insert a new basic block into the function, add a PHI 1540b57cec5SDimitry Andric // nodes (if the function returns values), and convert all of the return 1550b57cec5SDimitry Andric // instructions into unconditional branches. 1560b57cec5SDimitry Andric BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F); 15713138422SDimitry Andric IRBuilder<> B(NewRetBlock); 15813138422SDimitry Andric 15913138422SDimitry Andric if (InsertExport) { 16013138422SDimitry Andric // Ensure that there's only one "done" export in the shader by removing the 16113138422SDimitry Andric // "done" bit set on the original final export. More than one "done" export 16213138422SDimitry Andric // can lead to undefined behavior. 16313138422SDimitry Andric removeDoneExport(F); 16413138422SDimitry Andric 16513138422SDimitry Andric Value *Undef = UndefValue::get(B.getFloatTy()); 16613138422SDimitry Andric B.CreateIntrinsic(Intrinsic::amdgcn_exp, { B.getFloatTy() }, 16713138422SDimitry Andric { 168*e8d8bef9SDimitry Andric B.getInt32(AMDGPU::Exp::ET_NULL), 16913138422SDimitry Andric B.getInt32(0), // enabled channels 17013138422SDimitry Andric Undef, Undef, Undef, Undef, // values 17113138422SDimitry Andric B.getTrue(), // done 17213138422SDimitry Andric B.getTrue(), // valid mask 17313138422SDimitry Andric }); 17413138422SDimitry Andric } 1750b57cec5SDimitry Andric 1760b57cec5SDimitry Andric PHINode *PN = nullptr; 1770b57cec5SDimitry Andric if (F.getReturnType()->isVoidTy()) { 17813138422SDimitry Andric B.CreateRetVoid(); 1790b57cec5SDimitry Andric } else { 1800b57cec5SDimitry Andric // If the function doesn't return void... add a PHI node to the block... 18113138422SDimitry Andric PN = B.CreatePHI(F.getReturnType(), ReturningBlocks.size(), 1820b57cec5SDimitry Andric "UnifiedRetVal"); 18313138422SDimitry Andric assert(!InsertExport); 18413138422SDimitry Andric B.CreateRet(PN); 1850b57cec5SDimitry Andric } 1860b57cec5SDimitry Andric 1870b57cec5SDimitry Andric // Loop over all of the blocks, replacing the return instruction with an 1880b57cec5SDimitry Andric // unconditional branch. 189*e8d8bef9SDimitry Andric std::vector<DominatorTree::UpdateType> Updates; 190*e8d8bef9SDimitry Andric Updates.reserve(ReturningBlocks.size()); 1910b57cec5SDimitry Andric for (BasicBlock *BB : ReturningBlocks) { 1920b57cec5SDimitry Andric // Add an incoming element to the PHI node for every return instruction that 1930b57cec5SDimitry Andric // is merging into this new block... 1940b57cec5SDimitry Andric if (PN) 1950b57cec5SDimitry Andric PN->addIncoming(BB->getTerminator()->getOperand(0), BB); 1960b57cec5SDimitry Andric 1970b57cec5SDimitry Andric // Remove and delete the return inst. 1980b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 1990b57cec5SDimitry Andric BranchInst::Create(NewRetBlock, BB); 200*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, BB, NewRetBlock}); 2010b57cec5SDimitry Andric } 2020b57cec5SDimitry Andric 203*e8d8bef9SDimitry Andric if (RequireAndPreserveDomTree) 204*e8d8bef9SDimitry Andric DTU.applyUpdates(Updates); 205*e8d8bef9SDimitry Andric Updates.clear(); 206*e8d8bef9SDimitry Andric 2070b57cec5SDimitry Andric for (BasicBlock *BB : ReturningBlocks) { 2080b57cec5SDimitry Andric // Cleanup possible branch to unconditional branch to the return. 209*e8d8bef9SDimitry Andric simplifyCFG(BB, TTI, RequireAndPreserveDomTree ? &DTU : nullptr, 210*e8d8bef9SDimitry Andric SimplifyCFGOptions().bonusInstThreshold(2)); 2110b57cec5SDimitry Andric } 2120b57cec5SDimitry Andric 2130b57cec5SDimitry Andric return NewRetBlock; 2140b57cec5SDimitry Andric } 2150b57cec5SDimitry Andric 2160b57cec5SDimitry Andric bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) { 217*e8d8bef9SDimitry Andric DominatorTree *DT = nullptr; 218*e8d8bef9SDimitry Andric if (RequireAndPreserveDomTree) 219*e8d8bef9SDimitry Andric DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 220*e8d8bef9SDimitry Andric 2210b57cec5SDimitry Andric auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 2225ffd83dbSDimitry Andric 2235ffd83dbSDimitry Andric // If there's only one exit, we don't need to do anything, unless this is a 2245ffd83dbSDimitry Andric // pixel shader and that exit is an infinite loop, since we still have to 2255ffd83dbSDimitry Andric // insert an export in that case. 2265ffd83dbSDimitry Andric if (PDT.root_size() <= 1 && F.getCallingConv() != CallingConv::AMDGPU_PS) 2270b57cec5SDimitry Andric return false; 2280b57cec5SDimitry Andric 2290b57cec5SDimitry Andric LegacyDivergenceAnalysis &DA = getAnalysis<LegacyDivergenceAnalysis>(); 2300b57cec5SDimitry Andric 2310b57cec5SDimitry Andric // Loop over all of the blocks in a function, tracking all of the blocks that 2320b57cec5SDimitry Andric // return. 2330b57cec5SDimitry Andric SmallVector<BasicBlock *, 4> ReturningBlocks; 2345ffd83dbSDimitry Andric SmallVector<BasicBlock *, 4> UniformlyReachedRetBlocks; 2350b57cec5SDimitry Andric SmallVector<BasicBlock *, 4> UnreachableBlocks; 2360b57cec5SDimitry Andric 2370b57cec5SDimitry Andric // Dummy return block for infinite loop. 2380b57cec5SDimitry Andric BasicBlock *DummyReturnBB = nullptr; 2390b57cec5SDimitry Andric 24013138422SDimitry Andric bool InsertExport = false; 24113138422SDimitry Andric 2425ffd83dbSDimitry Andric bool Changed = false; 243*e8d8bef9SDimitry Andric std::vector<DominatorTree::UpdateType> Updates; 244*e8d8bef9SDimitry Andric 2455ffd83dbSDimitry Andric for (BasicBlock *BB : PDT.roots()) { 2460b57cec5SDimitry Andric if (isa<ReturnInst>(BB->getTerminator())) { 2470b57cec5SDimitry Andric if (!isUniformlyReached(DA, *BB)) 2480b57cec5SDimitry Andric ReturningBlocks.push_back(BB); 2495ffd83dbSDimitry Andric else 2505ffd83dbSDimitry Andric UniformlyReachedRetBlocks.push_back(BB); 2510b57cec5SDimitry Andric } else if (isa<UnreachableInst>(BB->getTerminator())) { 2520b57cec5SDimitry Andric if (!isUniformlyReached(DA, *BB)) 2530b57cec5SDimitry Andric UnreachableBlocks.push_back(BB); 2540b57cec5SDimitry Andric } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 2550b57cec5SDimitry Andric 2560b57cec5SDimitry Andric ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext()); 2570b57cec5SDimitry Andric if (DummyReturnBB == nullptr) { 2580b57cec5SDimitry Andric DummyReturnBB = BasicBlock::Create(F.getContext(), 2590b57cec5SDimitry Andric "DummyReturnBlock", &F); 2600b57cec5SDimitry Andric Type *RetTy = F.getReturnType(); 2610b57cec5SDimitry Andric Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); 26213138422SDimitry Andric 26313138422SDimitry Andric // For pixel shaders, the producer guarantees that an export is 26413138422SDimitry Andric // executed before each return instruction. However, if there is an 26513138422SDimitry Andric // infinite loop and we insert a return ourselves, we need to uphold 26613138422SDimitry Andric // that guarantee by inserting a null export. This can happen e.g. in 26713138422SDimitry Andric // an infinite loop with kill instructions, which is supposed to 26813138422SDimitry Andric // terminate. However, we don't need to do this if there is a non-void 26913138422SDimitry Andric // return value, since then there is an epilog afterwards which will 27013138422SDimitry Andric // still export. 27113138422SDimitry Andric // 27213138422SDimitry Andric // Note: In the case where only some threads enter the infinite loop, 27313138422SDimitry Andric // this can result in the null export happening redundantly after the 27413138422SDimitry Andric // original exports. However, The last "real" export happens after all 27513138422SDimitry Andric // the threads that didn't enter an infinite loop converged, which 27613138422SDimitry Andric // means that the only extra threads to execute the null export are 27713138422SDimitry Andric // threads that entered the infinite loop, and they only could've 27813138422SDimitry Andric // exited through being killed which sets their exec bit to 0. 27913138422SDimitry Andric // Therefore, unless there's an actual infinite loop, which can have 28013138422SDimitry Andric // invalid results, or there's a kill after the last export, which we 28113138422SDimitry Andric // assume the frontend won't do, this export will have the same exec 28213138422SDimitry Andric // mask as the last "real" export, and therefore the valid mask will be 28313138422SDimitry Andric // overwritten with the same value and will still be correct. Also, 28413138422SDimitry Andric // even though this forces an extra unnecessary export wait, we assume 28513138422SDimitry Andric // that this happens rare enough in practice to that we don't have to 28613138422SDimitry Andric // worry about performance. 28713138422SDimitry Andric if (F.getCallingConv() == CallingConv::AMDGPU_PS && 28813138422SDimitry Andric RetTy->isVoidTy()) { 28913138422SDimitry Andric InsertExport = true; 29013138422SDimitry Andric } 29113138422SDimitry Andric 2920b57cec5SDimitry Andric ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB); 2930b57cec5SDimitry Andric ReturningBlocks.push_back(DummyReturnBB); 2940b57cec5SDimitry Andric } 2950b57cec5SDimitry Andric 2960b57cec5SDimitry Andric if (BI->isUnconditional()) { 2970b57cec5SDimitry Andric BasicBlock *LoopHeaderBB = BI->getSuccessor(0); 2980b57cec5SDimitry Andric BI->eraseFromParent(); // Delete the unconditional branch. 2990b57cec5SDimitry Andric // Add a new conditional branch with a dummy edge to the return block. 3000b57cec5SDimitry Andric BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB); 301*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, BB, DummyReturnBB}); 3020b57cec5SDimitry Andric } else { // Conditional branch. 303*e8d8bef9SDimitry Andric SmallVector<BasicBlock *, 2> Successors(succ_begin(BB), succ_end(BB)); 304*e8d8bef9SDimitry Andric 3050b57cec5SDimitry Andric // Create a new transition block to hold the conditional branch. 3060b57cec5SDimitry Andric BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock"); 3070b57cec5SDimitry Andric 308*e8d8bef9SDimitry Andric Updates.reserve(Updates.size() + 2 * Successors.size() + 2); 309*e8d8bef9SDimitry Andric 310*e8d8bef9SDimitry Andric // 'Successors' become successors of TransitionBB instead of BB, 311*e8d8bef9SDimitry Andric // and TransitionBB becomes a single successor of BB. 312*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, BB, TransitionBB}); 313*e8d8bef9SDimitry Andric for (BasicBlock *Successor : Successors) { 314*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, TransitionBB, Successor}); 315*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Delete, BB, Successor}); 316*e8d8bef9SDimitry Andric } 317*e8d8bef9SDimitry Andric 3180b57cec5SDimitry Andric // Create a branch that will always branch to the transition block and 3190b57cec5SDimitry Andric // references DummyReturnBB. 3200b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 3210b57cec5SDimitry Andric BranchInst::Create(TransitionBB, DummyReturnBB, BoolTrue, BB); 322*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, BB, DummyReturnBB}); 3230b57cec5SDimitry Andric } 3245ffd83dbSDimitry Andric Changed = true; 3250b57cec5SDimitry Andric } 3260b57cec5SDimitry Andric } 3270b57cec5SDimitry Andric 3280b57cec5SDimitry Andric if (!UnreachableBlocks.empty()) { 3290b57cec5SDimitry Andric BasicBlock *UnreachableBlock = nullptr; 3300b57cec5SDimitry Andric 3310b57cec5SDimitry Andric if (UnreachableBlocks.size() == 1) { 3320b57cec5SDimitry Andric UnreachableBlock = UnreachableBlocks.front(); 3330b57cec5SDimitry Andric } else { 3340b57cec5SDimitry Andric UnreachableBlock = BasicBlock::Create(F.getContext(), 3350b57cec5SDimitry Andric "UnifiedUnreachableBlock", &F); 3360b57cec5SDimitry Andric new UnreachableInst(F.getContext(), UnreachableBlock); 3370b57cec5SDimitry Andric 338*e8d8bef9SDimitry Andric Updates.reserve(Updates.size() + UnreachableBlocks.size()); 3390b57cec5SDimitry Andric for (BasicBlock *BB : UnreachableBlocks) { 3400b57cec5SDimitry Andric // Remove and delete the unreachable inst. 3410b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 3420b57cec5SDimitry Andric BranchInst::Create(UnreachableBlock, BB); 343*e8d8bef9SDimitry Andric Updates.push_back({DominatorTree::Insert, BB, UnreachableBlock}); 3440b57cec5SDimitry Andric } 3455ffd83dbSDimitry Andric Changed = true; 3460b57cec5SDimitry Andric } 3470b57cec5SDimitry Andric 3480b57cec5SDimitry Andric if (!ReturningBlocks.empty()) { 3490b57cec5SDimitry Andric // Don't create a new unreachable inst if we have a return. The 3500b57cec5SDimitry Andric // structurizer/annotator can't handle the multiple exits 3510b57cec5SDimitry Andric 3520b57cec5SDimitry Andric Type *RetTy = F.getReturnType(); 3530b57cec5SDimitry Andric Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); 3540b57cec5SDimitry Andric // Remove and delete the unreachable inst. 3550b57cec5SDimitry Andric UnreachableBlock->getTerminator()->eraseFromParent(); 3560b57cec5SDimitry Andric 3570b57cec5SDimitry Andric Function *UnreachableIntrin = 3580b57cec5SDimitry Andric Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable); 3590b57cec5SDimitry Andric 3600b57cec5SDimitry Andric // Insert a call to an intrinsic tracking that this is an unreachable 3610b57cec5SDimitry Andric // point, in case we want to kill the active lanes or something later. 3620b57cec5SDimitry Andric CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock); 3630b57cec5SDimitry Andric 3640b57cec5SDimitry Andric // Don't create a scalar trap. We would only want to trap if this code was 3650b57cec5SDimitry Andric // really reached, but a scalar trap would happen even if no lanes 3660b57cec5SDimitry Andric // actually reached here. 3670b57cec5SDimitry Andric ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock); 3680b57cec5SDimitry Andric ReturningBlocks.push_back(UnreachableBlock); 3695ffd83dbSDimitry Andric Changed = true; 3700b57cec5SDimitry Andric } 3710b57cec5SDimitry Andric } 3720b57cec5SDimitry Andric 373*e8d8bef9SDimitry Andric // FIXME: add PDT here once simplifycfg is ready. 374*e8d8bef9SDimitry Andric DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 375*e8d8bef9SDimitry Andric if (RequireAndPreserveDomTree) 376*e8d8bef9SDimitry Andric DTU.applyUpdates(Updates); 377*e8d8bef9SDimitry Andric Updates.clear(); 378*e8d8bef9SDimitry Andric 3790b57cec5SDimitry Andric // Now handle return blocks. 3800b57cec5SDimitry Andric if (ReturningBlocks.empty()) 3815ffd83dbSDimitry Andric return Changed; // No blocks return 3820b57cec5SDimitry Andric 3835ffd83dbSDimitry Andric if (ReturningBlocks.size() == 1 && !InsertExport) 3845ffd83dbSDimitry Andric return Changed; // Already has a single return block 3850b57cec5SDimitry Andric 3860b57cec5SDimitry Andric const TargetTransformInfo &TTI 3870b57cec5SDimitry Andric = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3880b57cec5SDimitry Andric 3895ffd83dbSDimitry Andric // Unify returning blocks. If we are going to insert the export it is also 3905ffd83dbSDimitry Andric // necessary to include blocks that are uniformly reached, because in addition 3915ffd83dbSDimitry Andric // to inserting the export the "done" bits on existing exports will be cleared 3925ffd83dbSDimitry Andric // and we do not want to end up with the normal export in a non-unified, 3935ffd83dbSDimitry Andric // uniformly reached block with the "done" bit cleared. 3945ffd83dbSDimitry Andric auto BlocksToUnify = std::move(ReturningBlocks); 3955ffd83dbSDimitry Andric if (InsertExport) { 396*e8d8bef9SDimitry Andric llvm::append_range(BlocksToUnify, UniformlyReachedRetBlocks); 3975ffd83dbSDimitry Andric } 3985ffd83dbSDimitry Andric 399*e8d8bef9SDimitry Andric unifyReturnBlockSet(F, DTU, BlocksToUnify, InsertExport, TTI, 4005ffd83dbSDimitry Andric "UnifiedReturnBlock"); 4010b57cec5SDimitry Andric return true; 4020b57cec5SDimitry Andric } 403