10b57cec5SDimitry Andric //===- AMDGPUUnifyDivergentExitNodes.cpp ----------------------------------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric // 90b57cec5SDimitry Andric // This is a variant of the UnifyDivergentExitNodes pass. Rather than ensuring 100b57cec5SDimitry Andric // there is at most one ret and one unreachable instruction, it ensures there is 110b57cec5SDimitry Andric // at most one divergent exiting block. 120b57cec5SDimitry Andric // 130b57cec5SDimitry Andric // StructurizeCFG can't deal with multi-exit regions formed by branches to 140b57cec5SDimitry Andric // multiple return nodes. It is not desirable to structurize regions with 150b57cec5SDimitry Andric // uniform branches, so unifying those to the same return block as divergent 160b57cec5SDimitry Andric // branches inhibits use of scalar branching. It still can't deal with the case 170b57cec5SDimitry Andric // where one branch goes to return, and one unreachable. Replace unreachable in 180b57cec5SDimitry Andric // this case with a return. 190b57cec5SDimitry Andric // 200b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 210b57cec5SDimitry Andric 220b57cec5SDimitry Andric #include "AMDGPU.h" 230b57cec5SDimitry Andric #include "llvm/ADT/ArrayRef.h" 240b57cec5SDimitry Andric #include "llvm/ADT/SmallPtrSet.h" 250b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h" 260b57cec5SDimitry Andric #include "llvm/ADT/StringRef.h" 270b57cec5SDimitry Andric #include "llvm/Analysis/LegacyDivergenceAnalysis.h" 280b57cec5SDimitry Andric #include "llvm/Analysis/PostDominators.h" 290b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h" 300b57cec5SDimitry Andric #include "llvm/IR/BasicBlock.h" 310b57cec5SDimitry Andric #include "llvm/IR/CFG.h" 320b57cec5SDimitry Andric #include "llvm/IR/Constants.h" 330b57cec5SDimitry Andric #include "llvm/IR/Function.h" 340b57cec5SDimitry Andric #include "llvm/IR/InstrTypes.h" 350b57cec5SDimitry Andric #include "llvm/IR/Instructions.h" 360b57cec5SDimitry Andric #include "llvm/IR/Intrinsics.h" 3713138422SDimitry Andric #include "llvm/IR/IRBuilder.h" 380b57cec5SDimitry Andric #include "llvm/IR/Type.h" 39480093f4SDimitry Andric #include "llvm/InitializePasses.h" 400b57cec5SDimitry Andric #include "llvm/Pass.h" 410b57cec5SDimitry Andric #include "llvm/Support/Casting.h" 420b57cec5SDimitry Andric #include "llvm/Transforms/Scalar.h" 430b57cec5SDimitry Andric #include "llvm/Transforms/Utils.h" 44480093f4SDimitry Andric #include "llvm/Transforms/Utils/Local.h" 450b57cec5SDimitry Andric 460b57cec5SDimitry Andric using namespace llvm; 470b57cec5SDimitry Andric 480b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-unify-divergent-exit-nodes" 490b57cec5SDimitry Andric 500b57cec5SDimitry Andric namespace { 510b57cec5SDimitry Andric 520b57cec5SDimitry Andric class AMDGPUUnifyDivergentExitNodes : public FunctionPass { 530b57cec5SDimitry Andric public: 540b57cec5SDimitry Andric static char ID; // Pass identification, replacement for typeid 550b57cec5SDimitry Andric 560b57cec5SDimitry Andric AMDGPUUnifyDivergentExitNodes() : FunctionPass(ID) { 570b57cec5SDimitry Andric initializeAMDGPUUnifyDivergentExitNodesPass(*PassRegistry::getPassRegistry()); 580b57cec5SDimitry Andric } 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric // We can preserve non-critical-edgeness when we unify function exit nodes 610b57cec5SDimitry Andric void getAnalysisUsage(AnalysisUsage &AU) const override; 620b57cec5SDimitry Andric bool runOnFunction(Function &F) override; 630b57cec5SDimitry Andric }; 640b57cec5SDimitry Andric 650b57cec5SDimitry Andric } // end anonymous namespace 660b57cec5SDimitry Andric 670b57cec5SDimitry Andric char AMDGPUUnifyDivergentExitNodes::ID = 0; 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric char &llvm::AMDGPUUnifyDivergentExitNodesID = AMDGPUUnifyDivergentExitNodes::ID; 700b57cec5SDimitry Andric 710b57cec5SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, 720b57cec5SDimitry Andric "Unify divergent function exit nodes", false, false) 730b57cec5SDimitry Andric INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 740b57cec5SDimitry Andric INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) 750b57cec5SDimitry Andric INITIALIZE_PASS_END(AMDGPUUnifyDivergentExitNodes, DEBUG_TYPE, 760b57cec5SDimitry Andric "Unify divergent function exit nodes", false, false) 770b57cec5SDimitry Andric 780b57cec5SDimitry Andric void AMDGPUUnifyDivergentExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{ 790b57cec5SDimitry Andric // TODO: Preserve dominator tree. 800b57cec5SDimitry Andric AU.addRequired<PostDominatorTreeWrapperPass>(); 810b57cec5SDimitry Andric 820b57cec5SDimitry Andric AU.addRequired<LegacyDivergenceAnalysis>(); 830b57cec5SDimitry Andric 840b57cec5SDimitry Andric // No divergent values are changed, only blocks and branch edges. 850b57cec5SDimitry Andric AU.addPreserved<LegacyDivergenceAnalysis>(); 860b57cec5SDimitry Andric 870b57cec5SDimitry Andric // We preserve the non-critical-edgeness property 880b57cec5SDimitry Andric AU.addPreservedID(BreakCriticalEdgesID); 890b57cec5SDimitry Andric 900b57cec5SDimitry Andric // This is a cluster of orthogonal Transforms 910b57cec5SDimitry Andric AU.addPreservedID(LowerSwitchID); 920b57cec5SDimitry Andric FunctionPass::getAnalysisUsage(AU); 930b57cec5SDimitry Andric 940b57cec5SDimitry Andric AU.addRequired<TargetTransformInfoWrapperPass>(); 950b57cec5SDimitry Andric } 960b57cec5SDimitry Andric 970b57cec5SDimitry Andric /// \returns true if \p BB is reachable through only uniform branches. 980b57cec5SDimitry Andric /// XXX - Is there a more efficient way to find this? 990b57cec5SDimitry Andric static bool isUniformlyReached(const LegacyDivergenceAnalysis &DA, 1000b57cec5SDimitry Andric BasicBlock &BB) { 1010b57cec5SDimitry Andric SmallVector<BasicBlock *, 8> Stack; 1020b57cec5SDimitry Andric SmallPtrSet<BasicBlock *, 8> Visited; 1030b57cec5SDimitry Andric 1040b57cec5SDimitry Andric for (BasicBlock *Pred : predecessors(&BB)) 1050b57cec5SDimitry Andric Stack.push_back(Pred); 1060b57cec5SDimitry Andric 1070b57cec5SDimitry Andric while (!Stack.empty()) { 1080b57cec5SDimitry Andric BasicBlock *Top = Stack.pop_back_val(); 1090b57cec5SDimitry Andric if (!DA.isUniform(Top->getTerminator())) 1100b57cec5SDimitry Andric return false; 1110b57cec5SDimitry Andric 1120b57cec5SDimitry Andric for (BasicBlock *Pred : predecessors(Top)) { 1130b57cec5SDimitry Andric if (Visited.insert(Pred).second) 1140b57cec5SDimitry Andric Stack.push_back(Pred); 1150b57cec5SDimitry Andric } 1160b57cec5SDimitry Andric } 1170b57cec5SDimitry Andric 1180b57cec5SDimitry Andric return true; 1190b57cec5SDimitry Andric } 1200b57cec5SDimitry Andric 12113138422SDimitry Andric static void removeDoneExport(Function &F) { 12213138422SDimitry Andric ConstantInt *BoolFalse = ConstantInt::getFalse(F.getContext()); 12313138422SDimitry Andric for (BasicBlock &BB : F) { 12413138422SDimitry Andric for (Instruction &I : BB) { 12513138422SDimitry Andric if (IntrinsicInst *Intrin = llvm::dyn_cast<IntrinsicInst>(&I)) { 12613138422SDimitry Andric if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp) { 12713138422SDimitry Andric Intrin->setArgOperand(6, BoolFalse); // done 12813138422SDimitry Andric } else if (Intrin->getIntrinsicID() == Intrinsic::amdgcn_exp_compr) { 12913138422SDimitry Andric Intrin->setArgOperand(4, BoolFalse); // done 13013138422SDimitry Andric } 13113138422SDimitry Andric } 13213138422SDimitry Andric } 13313138422SDimitry Andric } 13413138422SDimitry Andric } 13513138422SDimitry Andric 1360b57cec5SDimitry Andric static BasicBlock *unifyReturnBlockSet(Function &F, 1370b57cec5SDimitry Andric ArrayRef<BasicBlock *> ReturningBlocks, 13813138422SDimitry Andric bool InsertExport, 1390b57cec5SDimitry Andric const TargetTransformInfo &TTI, 1400b57cec5SDimitry Andric StringRef Name) { 1410b57cec5SDimitry Andric // Otherwise, we need to insert a new basic block into the function, add a PHI 1420b57cec5SDimitry Andric // nodes (if the function returns values), and convert all of the return 1430b57cec5SDimitry Andric // instructions into unconditional branches. 1440b57cec5SDimitry Andric BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(), Name, &F); 14513138422SDimitry Andric IRBuilder<> B(NewRetBlock); 14613138422SDimitry Andric 14713138422SDimitry Andric if (InsertExport) { 14813138422SDimitry Andric // Ensure that there's only one "done" export in the shader by removing the 14913138422SDimitry Andric // "done" bit set on the original final export. More than one "done" export 15013138422SDimitry Andric // can lead to undefined behavior. 15113138422SDimitry Andric removeDoneExport(F); 15213138422SDimitry Andric 15313138422SDimitry Andric Value *Undef = UndefValue::get(B.getFloatTy()); 15413138422SDimitry Andric B.CreateIntrinsic(Intrinsic::amdgcn_exp, { B.getFloatTy() }, 15513138422SDimitry Andric { 15613138422SDimitry Andric B.getInt32(9), // target, SQ_EXP_NULL 15713138422SDimitry Andric B.getInt32(0), // enabled channels 15813138422SDimitry Andric Undef, Undef, Undef, Undef, // values 15913138422SDimitry Andric B.getTrue(), // done 16013138422SDimitry Andric B.getTrue(), // valid mask 16113138422SDimitry Andric }); 16213138422SDimitry Andric } 1630b57cec5SDimitry Andric 1640b57cec5SDimitry Andric PHINode *PN = nullptr; 1650b57cec5SDimitry Andric if (F.getReturnType()->isVoidTy()) { 16613138422SDimitry Andric B.CreateRetVoid(); 1670b57cec5SDimitry Andric } else { 1680b57cec5SDimitry Andric // If the function doesn't return void... add a PHI node to the block... 16913138422SDimitry Andric PN = B.CreatePHI(F.getReturnType(), ReturningBlocks.size(), 1700b57cec5SDimitry Andric "UnifiedRetVal"); 17113138422SDimitry Andric assert(!InsertExport); 17213138422SDimitry Andric B.CreateRet(PN); 1730b57cec5SDimitry Andric } 1740b57cec5SDimitry Andric 1750b57cec5SDimitry Andric // Loop over all of the blocks, replacing the return instruction with an 1760b57cec5SDimitry Andric // unconditional branch. 1770b57cec5SDimitry Andric for (BasicBlock *BB : ReturningBlocks) { 1780b57cec5SDimitry Andric // Add an incoming element to the PHI node for every return instruction that 1790b57cec5SDimitry Andric // is merging into this new block... 1800b57cec5SDimitry Andric if (PN) 1810b57cec5SDimitry Andric PN->addIncoming(BB->getTerminator()->getOperand(0), BB); 1820b57cec5SDimitry Andric 1830b57cec5SDimitry Andric // Remove and delete the return inst. 1840b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 1850b57cec5SDimitry Andric BranchInst::Create(NewRetBlock, BB); 1860b57cec5SDimitry Andric } 1870b57cec5SDimitry Andric 1880b57cec5SDimitry Andric for (BasicBlock *BB : ReturningBlocks) { 1890b57cec5SDimitry Andric // Cleanup possible branch to unconditional branch to the return. 1900b57cec5SDimitry Andric simplifyCFG(BB, TTI, {2}); 1910b57cec5SDimitry Andric } 1920b57cec5SDimitry Andric 1930b57cec5SDimitry Andric return NewRetBlock; 1940b57cec5SDimitry Andric } 1950b57cec5SDimitry Andric 1960b57cec5SDimitry Andric bool AMDGPUUnifyDivergentExitNodes::runOnFunction(Function &F) { 1970b57cec5SDimitry Andric auto &PDT = getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 198*5ffd83dbSDimitry Andric 199*5ffd83dbSDimitry Andric // If there's only one exit, we don't need to do anything, unless this is a 200*5ffd83dbSDimitry Andric // pixel shader and that exit is an infinite loop, since we still have to 201*5ffd83dbSDimitry Andric // insert an export in that case. 202*5ffd83dbSDimitry Andric if (PDT.root_size() <= 1 && F.getCallingConv() != CallingConv::AMDGPU_PS) 2030b57cec5SDimitry Andric return false; 2040b57cec5SDimitry Andric 2050b57cec5SDimitry Andric LegacyDivergenceAnalysis &DA = getAnalysis<LegacyDivergenceAnalysis>(); 2060b57cec5SDimitry Andric 2070b57cec5SDimitry Andric // Loop over all of the blocks in a function, tracking all of the blocks that 2080b57cec5SDimitry Andric // return. 2090b57cec5SDimitry Andric SmallVector<BasicBlock *, 4> ReturningBlocks; 210*5ffd83dbSDimitry Andric SmallVector<BasicBlock *, 4> UniformlyReachedRetBlocks; 2110b57cec5SDimitry Andric SmallVector<BasicBlock *, 4> UnreachableBlocks; 2120b57cec5SDimitry Andric 2130b57cec5SDimitry Andric // Dummy return block for infinite loop. 2140b57cec5SDimitry Andric BasicBlock *DummyReturnBB = nullptr; 2150b57cec5SDimitry Andric 21613138422SDimitry Andric bool InsertExport = false; 21713138422SDimitry Andric 218*5ffd83dbSDimitry Andric bool Changed = false; 219*5ffd83dbSDimitry Andric for (BasicBlock *BB : PDT.roots()) { 2200b57cec5SDimitry Andric if (isa<ReturnInst>(BB->getTerminator())) { 2210b57cec5SDimitry Andric if (!isUniformlyReached(DA, *BB)) 2220b57cec5SDimitry Andric ReturningBlocks.push_back(BB); 223*5ffd83dbSDimitry Andric else 224*5ffd83dbSDimitry Andric UniformlyReachedRetBlocks.push_back(BB); 2250b57cec5SDimitry Andric } else if (isa<UnreachableInst>(BB->getTerminator())) { 2260b57cec5SDimitry Andric if (!isUniformlyReached(DA, *BB)) 2270b57cec5SDimitry Andric UnreachableBlocks.push_back(BB); 2280b57cec5SDimitry Andric } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 2290b57cec5SDimitry Andric 2300b57cec5SDimitry Andric ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext()); 2310b57cec5SDimitry Andric if (DummyReturnBB == nullptr) { 2320b57cec5SDimitry Andric DummyReturnBB = BasicBlock::Create(F.getContext(), 2330b57cec5SDimitry Andric "DummyReturnBlock", &F); 2340b57cec5SDimitry Andric Type *RetTy = F.getReturnType(); 2350b57cec5SDimitry Andric Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); 23613138422SDimitry Andric 23713138422SDimitry Andric // For pixel shaders, the producer guarantees that an export is 23813138422SDimitry Andric // executed before each return instruction. However, if there is an 23913138422SDimitry Andric // infinite loop and we insert a return ourselves, we need to uphold 24013138422SDimitry Andric // that guarantee by inserting a null export. This can happen e.g. in 24113138422SDimitry Andric // an infinite loop with kill instructions, which is supposed to 24213138422SDimitry Andric // terminate. However, we don't need to do this if there is a non-void 24313138422SDimitry Andric // return value, since then there is an epilog afterwards which will 24413138422SDimitry Andric // still export. 24513138422SDimitry Andric // 24613138422SDimitry Andric // Note: In the case where only some threads enter the infinite loop, 24713138422SDimitry Andric // this can result in the null export happening redundantly after the 24813138422SDimitry Andric // original exports. However, The last "real" export happens after all 24913138422SDimitry Andric // the threads that didn't enter an infinite loop converged, which 25013138422SDimitry Andric // means that the only extra threads to execute the null export are 25113138422SDimitry Andric // threads that entered the infinite loop, and they only could've 25213138422SDimitry Andric // exited through being killed which sets their exec bit to 0. 25313138422SDimitry Andric // Therefore, unless there's an actual infinite loop, which can have 25413138422SDimitry Andric // invalid results, or there's a kill after the last export, which we 25513138422SDimitry Andric // assume the frontend won't do, this export will have the same exec 25613138422SDimitry Andric // mask as the last "real" export, and therefore the valid mask will be 25713138422SDimitry Andric // overwritten with the same value and will still be correct. Also, 25813138422SDimitry Andric // even though this forces an extra unnecessary export wait, we assume 25913138422SDimitry Andric // that this happens rare enough in practice to that we don't have to 26013138422SDimitry Andric // worry about performance. 26113138422SDimitry Andric if (F.getCallingConv() == CallingConv::AMDGPU_PS && 26213138422SDimitry Andric RetTy->isVoidTy()) { 26313138422SDimitry Andric InsertExport = true; 26413138422SDimitry Andric } 26513138422SDimitry Andric 2660b57cec5SDimitry Andric ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB); 2670b57cec5SDimitry Andric ReturningBlocks.push_back(DummyReturnBB); 2680b57cec5SDimitry Andric } 2690b57cec5SDimitry Andric 2700b57cec5SDimitry Andric if (BI->isUnconditional()) { 2710b57cec5SDimitry Andric BasicBlock *LoopHeaderBB = BI->getSuccessor(0); 2720b57cec5SDimitry Andric BI->eraseFromParent(); // Delete the unconditional branch. 2730b57cec5SDimitry Andric // Add a new conditional branch with a dummy edge to the return block. 2740b57cec5SDimitry Andric BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB); 2750b57cec5SDimitry Andric } else { // Conditional branch. 2760b57cec5SDimitry Andric // Create a new transition block to hold the conditional branch. 2770b57cec5SDimitry Andric BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock"); 2780b57cec5SDimitry Andric 2790b57cec5SDimitry Andric // Create a branch that will always branch to the transition block and 2800b57cec5SDimitry Andric // references DummyReturnBB. 2810b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 2820b57cec5SDimitry Andric BranchInst::Create(TransitionBB, DummyReturnBB, BoolTrue, BB); 2830b57cec5SDimitry Andric } 284*5ffd83dbSDimitry Andric Changed = true; 2850b57cec5SDimitry Andric } 2860b57cec5SDimitry Andric } 2870b57cec5SDimitry Andric 2880b57cec5SDimitry Andric if (!UnreachableBlocks.empty()) { 2890b57cec5SDimitry Andric BasicBlock *UnreachableBlock = nullptr; 2900b57cec5SDimitry Andric 2910b57cec5SDimitry Andric if (UnreachableBlocks.size() == 1) { 2920b57cec5SDimitry Andric UnreachableBlock = UnreachableBlocks.front(); 2930b57cec5SDimitry Andric } else { 2940b57cec5SDimitry Andric UnreachableBlock = BasicBlock::Create(F.getContext(), 2950b57cec5SDimitry Andric "UnifiedUnreachableBlock", &F); 2960b57cec5SDimitry Andric new UnreachableInst(F.getContext(), UnreachableBlock); 2970b57cec5SDimitry Andric 2980b57cec5SDimitry Andric for (BasicBlock *BB : UnreachableBlocks) { 2990b57cec5SDimitry Andric // Remove and delete the unreachable inst. 3000b57cec5SDimitry Andric BB->getTerminator()->eraseFromParent(); 3010b57cec5SDimitry Andric BranchInst::Create(UnreachableBlock, BB); 3020b57cec5SDimitry Andric } 303*5ffd83dbSDimitry Andric Changed = true; 3040b57cec5SDimitry Andric } 3050b57cec5SDimitry Andric 3060b57cec5SDimitry Andric if (!ReturningBlocks.empty()) { 3070b57cec5SDimitry Andric // Don't create a new unreachable inst if we have a return. The 3080b57cec5SDimitry Andric // structurizer/annotator can't handle the multiple exits 3090b57cec5SDimitry Andric 3100b57cec5SDimitry Andric Type *RetTy = F.getReturnType(); 3110b57cec5SDimitry Andric Value *RetVal = RetTy->isVoidTy() ? nullptr : UndefValue::get(RetTy); 3120b57cec5SDimitry Andric // Remove and delete the unreachable inst. 3130b57cec5SDimitry Andric UnreachableBlock->getTerminator()->eraseFromParent(); 3140b57cec5SDimitry Andric 3150b57cec5SDimitry Andric Function *UnreachableIntrin = 3160b57cec5SDimitry Andric Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable); 3170b57cec5SDimitry Andric 3180b57cec5SDimitry Andric // Insert a call to an intrinsic tracking that this is an unreachable 3190b57cec5SDimitry Andric // point, in case we want to kill the active lanes or something later. 3200b57cec5SDimitry Andric CallInst::Create(UnreachableIntrin, {}, "", UnreachableBlock); 3210b57cec5SDimitry Andric 3220b57cec5SDimitry Andric // Don't create a scalar trap. We would only want to trap if this code was 3230b57cec5SDimitry Andric // really reached, but a scalar trap would happen even if no lanes 3240b57cec5SDimitry Andric // actually reached here. 3250b57cec5SDimitry Andric ReturnInst::Create(F.getContext(), RetVal, UnreachableBlock); 3260b57cec5SDimitry Andric ReturningBlocks.push_back(UnreachableBlock); 327*5ffd83dbSDimitry Andric Changed = true; 3280b57cec5SDimitry Andric } 3290b57cec5SDimitry Andric } 3300b57cec5SDimitry Andric 3310b57cec5SDimitry Andric // Now handle return blocks. 3320b57cec5SDimitry Andric if (ReturningBlocks.empty()) 333*5ffd83dbSDimitry Andric return Changed; // No blocks return 3340b57cec5SDimitry Andric 335*5ffd83dbSDimitry Andric if (ReturningBlocks.size() == 1 && !InsertExport) 336*5ffd83dbSDimitry Andric return Changed; // Already has a single return block 3370b57cec5SDimitry Andric 3380b57cec5SDimitry Andric const TargetTransformInfo &TTI 3390b57cec5SDimitry Andric = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3400b57cec5SDimitry Andric 341*5ffd83dbSDimitry Andric // Unify returning blocks. If we are going to insert the export it is also 342*5ffd83dbSDimitry Andric // necessary to include blocks that are uniformly reached, because in addition 343*5ffd83dbSDimitry Andric // to inserting the export the "done" bits on existing exports will be cleared 344*5ffd83dbSDimitry Andric // and we do not want to end up with the normal export in a non-unified, 345*5ffd83dbSDimitry Andric // uniformly reached block with the "done" bit cleared. 346*5ffd83dbSDimitry Andric auto BlocksToUnify = std::move(ReturningBlocks); 347*5ffd83dbSDimitry Andric if (InsertExport) { 348*5ffd83dbSDimitry Andric BlocksToUnify.insert(BlocksToUnify.end(), UniformlyReachedRetBlocks.begin(), 349*5ffd83dbSDimitry Andric UniformlyReachedRetBlocks.end()); 350*5ffd83dbSDimitry Andric } 351*5ffd83dbSDimitry Andric 352*5ffd83dbSDimitry Andric unifyReturnBlockSet(F, BlocksToUnify, InsertExport, TTI, 353*5ffd83dbSDimitry Andric "UnifiedReturnBlock"); 3540b57cec5SDimitry Andric return true; 3550b57cec5SDimitry Andric } 356