1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file transforms calls of the current function (self recursion) followed 10 // by a return instruction with a branch to the entry of the function, creating 11 // a loop. This pass also implements the following extensions to the basic 12 // algorithm: 13 // 14 // 1. Trivial instructions between the call and return do not prevent the 15 // transformation from taking place, though currently the analysis cannot 16 // support moving any really useful instructions (only dead ones). 17 // 2. This pass transforms functions that are prevented from being tail 18 // recursive by an associative and commutative expression to use an 19 // accumulator variable, thus compiling the typical naive factorial or 20 // 'fib' implementation into efficient code. 21 // 3. TRE is performed if the function returns void, if the return 22 // returns the result returned by the call, or if the function returns a 23 // run-time constant on all exits from the function. It is possible, though 24 // unlikely, that the return returns something else (like constant 0), and 25 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in 26 // the function return the exact same value. 27 // 4. If it can prove that callees do not access their caller stack frame, 28 // they are marked as eligible for tail call elimination (by the code 29 // generator). 30 // 31 // There are several improvements that could be made: 32 // 33 // 1. If the function has any alloca instructions, these instructions will be 34 // moved out of the entry block of the function, causing them to be 35 // evaluated each time through the tail recursion. Safely keeping allocas 36 // in the entry block requires analysis to proves that the tail-called 37 // function does not read or write the stack object. 38 // 2. Tail recursion is only performed if the call immediately precedes the 39 // return instruction. It's possible that there could be a jump between 40 // the call and the return. 41 // 3. There can be intervening operations between the call and the return that 42 // prevent the TRE from occurring. For example, there could be GEP's and 43 // stores to memory that will not be read or written by the call. This 44 // requires some substantial analysis (such as with DSA) to prove safe to 45 // move ahead of the call, but doing so could allow many more TREs to be 46 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark. 47 // 4. The algorithm we use to detect if callees access their caller stack 48 // frames is very primitive. 49 // 50 //===----------------------------------------------------------------------===// 51 52 #include "llvm/Transforms/Scalar/TailRecursionElimination.h" 53 #include "llvm/ADT/STLExtras.h" 54 #include "llvm/ADT/SmallPtrSet.h" 55 #include "llvm/ADT/Statistic.h" 56 #include "llvm/Analysis/DomTreeUpdater.h" 57 #include "llvm/Analysis/GlobalsModRef.h" 58 #include "llvm/Analysis/InstructionSimplify.h" 59 #include "llvm/Analysis/Loads.h" 60 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 61 #include "llvm/Analysis/PostDominators.h" 62 #include "llvm/Analysis/TargetTransformInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/IR/CFG.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DerivedTypes.h" 68 #include "llvm/IR/DiagnosticInfo.h" 69 #include "llvm/IR/Dominators.h" 70 #include "llvm/IR/Function.h" 71 #include "llvm/IR/IRBuilder.h" 72 #include "llvm/IR/InstIterator.h" 73 #include "llvm/IR/Instructions.h" 74 #include "llvm/IR/IntrinsicInst.h" 75 #include "llvm/IR/Module.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 82 using namespace llvm; 83 84 #define DEBUG_TYPE "tailcallelim" 85 86 STATISTIC(NumEliminated, "Number of tail calls removed"); 87 STATISTIC(NumRetDuped, "Number of return duplicated"); 88 STATISTIC(NumAccumAdded, "Number of accumulators introduced"); 89 90 /// Scan the specified function for alloca instructions. 91 /// If it contains any dynamic allocas, returns false. 92 static bool canTRE(Function &F) { 93 // TODO: We don't do TRE if dynamic allocas are used. 94 // Dynamic allocas allocate stack space which should be 95 // deallocated before new iteration started. That is 96 // currently not implemented. 97 return llvm::all_of(instructions(F), [](Instruction &I) { 98 auto *AI = dyn_cast<AllocaInst>(&I); 99 return !AI || AI->isStaticAlloca(); 100 }); 101 } 102 103 namespace { 104 struct AllocaDerivedValueTracker { 105 // Start at a root value and walk its use-def chain to mark calls that use the 106 // value or a derived value in AllocaUsers, and places where it may escape in 107 // EscapePoints. 108 void walk(Value *Root) { 109 SmallVector<Use *, 32> Worklist; 110 SmallPtrSet<Use *, 32> Visited; 111 112 auto AddUsesToWorklist = [&](Value *V) { 113 for (auto &U : V->uses()) { 114 if (!Visited.insert(&U).second) 115 continue; 116 Worklist.push_back(&U); 117 } 118 }; 119 120 AddUsesToWorklist(Root); 121 122 while (!Worklist.empty()) { 123 Use *U = Worklist.pop_back_val(); 124 Instruction *I = cast<Instruction>(U->getUser()); 125 126 switch (I->getOpcode()) { 127 case Instruction::Call: 128 case Instruction::Invoke: { 129 auto &CB = cast<CallBase>(*I); 130 // If the alloca-derived argument is passed byval it is not an escape 131 // point, or a use of an alloca. Calling with byval copies the contents 132 // of the alloca into argument registers or stack slots, which exist 133 // beyond the lifetime of the current frame. 134 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U))) 135 continue; 136 bool IsNocapture = 137 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U)); 138 callUsesLocalStack(CB, IsNocapture); 139 if (IsNocapture) { 140 // If the alloca-derived argument is passed in as nocapture, then it 141 // can't propagate to the call's return. That would be capturing. 142 continue; 143 } 144 break; 145 } 146 case Instruction::Load: { 147 // The result of a load is not alloca-derived (unless an alloca has 148 // otherwise escaped, but this is a local analysis). 149 continue; 150 } 151 case Instruction::Store: { 152 if (U->getOperandNo() == 0) 153 EscapePoints.insert(I); 154 continue; // Stores have no users to analyze. 155 } 156 case Instruction::BitCast: 157 case Instruction::GetElementPtr: 158 case Instruction::PHI: 159 case Instruction::Select: 160 case Instruction::AddrSpaceCast: 161 break; 162 default: 163 EscapePoints.insert(I); 164 break; 165 } 166 167 AddUsesToWorklist(I); 168 } 169 } 170 171 void callUsesLocalStack(CallBase &CB, bool IsNocapture) { 172 // Add it to the list of alloca users. 173 AllocaUsers.insert(&CB); 174 175 // If it's nocapture then it can't capture this alloca. 176 if (IsNocapture) 177 return; 178 179 // If it can write to memory, it can leak the alloca value. 180 if (!CB.onlyReadsMemory()) 181 EscapePoints.insert(&CB); 182 } 183 184 SmallPtrSet<Instruction *, 32> AllocaUsers; 185 SmallPtrSet<Instruction *, 32> EscapePoints; 186 }; 187 } 188 189 static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) { 190 if (F.callsFunctionThatReturnsTwice()) 191 return false; 192 193 // The local stack holds all alloca instructions and all byval arguments. 194 AllocaDerivedValueTracker Tracker; 195 for (Argument &Arg : F.args()) { 196 if (Arg.hasByValAttr()) 197 Tracker.walk(&Arg); 198 } 199 for (auto &BB : F) { 200 for (auto &I : BB) 201 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 202 Tracker.walk(AI); 203 } 204 205 bool Modified = false; 206 207 // Track whether a block is reachable after an alloca has escaped. Blocks that 208 // contain the escaping instruction will be marked as being visited without an 209 // escaped alloca, since that is how the block began. 210 enum VisitType { 211 UNVISITED, 212 UNESCAPED, 213 ESCAPED 214 }; 215 DenseMap<BasicBlock *, VisitType> Visited; 216 217 // We propagate the fact that an alloca has escaped from block to successor. 218 // Visit the blocks that are propagating the escapedness first. To do this, we 219 // maintain two worklists. 220 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped; 221 222 // We may enter a block and visit it thinking that no alloca has escaped yet, 223 // then see an escape point and go back around a loop edge and come back to 224 // the same block twice. Because of this, we defer setting tail on calls when 225 // we first encounter them in a block. Every entry in this list does not 226 // statically use an alloca via use-def chain analysis, but may find an alloca 227 // through other means if the block turns out to be reachable after an escape 228 // point. 229 SmallVector<CallInst *, 32> DeferredTails; 230 231 BasicBlock *BB = &F.getEntryBlock(); 232 VisitType Escaped = UNESCAPED; 233 do { 234 for (auto &I : *BB) { 235 if (Tracker.EscapePoints.count(&I)) 236 Escaped = ESCAPED; 237 238 CallInst *CI = dyn_cast<CallInst>(&I); 239 // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is 240 // considered accessing memory and will be marked as a tail call if we 241 // don't bail out here. 242 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I) || 243 isa<PseudoProbeInst>(&I)) 244 continue; 245 246 // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and 247 // "kcfi". 248 bool IsNoTail = CI->isNoTailCall() || 249 CI->hasOperandBundlesOtherThan( 250 {LLVMContext::OB_clang_arc_attachedcall, 251 LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi}); 252 253 if (!IsNoTail && CI->doesNotAccessMemory()) { 254 // A call to a readnone function whose arguments are all things computed 255 // outside this function can be marked tail. Even if you stored the 256 // alloca address into a global, a readnone function can't load the 257 // global anyhow. 258 // 259 // Note that this runs whether we know an alloca has escaped or not. If 260 // it has, then we can't trust Tracker.AllocaUsers to be accurate. 261 bool SafeToTail = true; 262 for (auto &Arg : CI->args()) { 263 if (isa<Constant>(Arg.getUser())) 264 continue; 265 if (Argument *A = dyn_cast<Argument>(Arg.getUser())) 266 if (!A->hasByValAttr()) 267 continue; 268 SafeToTail = false; 269 break; 270 } 271 if (SafeToTail) { 272 using namespace ore; 273 ORE->emit([&]() { 274 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI) 275 << "marked as tail call candidate (readnone)"; 276 }); 277 CI->setTailCall(); 278 Modified = true; 279 continue; 280 } 281 } 282 283 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) 284 DeferredTails.push_back(CI); 285 } 286 287 for (auto *SuccBB : successors(BB)) { 288 auto &State = Visited[SuccBB]; 289 if (State < Escaped) { 290 State = Escaped; 291 if (State == ESCAPED) 292 WorklistEscaped.push_back(SuccBB); 293 else 294 WorklistUnescaped.push_back(SuccBB); 295 } 296 } 297 298 if (!WorklistEscaped.empty()) { 299 BB = WorklistEscaped.pop_back_val(); 300 Escaped = ESCAPED; 301 } else { 302 BB = nullptr; 303 while (!WorklistUnescaped.empty()) { 304 auto *NextBB = WorklistUnescaped.pop_back_val(); 305 if (Visited[NextBB] == UNESCAPED) { 306 BB = NextBB; 307 Escaped = UNESCAPED; 308 break; 309 } 310 } 311 } 312 } while (BB); 313 314 for (CallInst *CI : DeferredTails) { 315 if (Visited[CI->getParent()] != ESCAPED) { 316 // If the escape point was part way through the block, calls after the 317 // escape point wouldn't have been put into DeferredTails. 318 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n"); 319 CI->setTailCall(); 320 Modified = true; 321 } 322 } 323 324 return Modified; 325 } 326 327 /// Return true if it is safe to move the specified 328 /// instruction from after the call to before the call, assuming that all 329 /// instructions between the call and this instruction are movable. 330 /// 331 static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) { 332 if (isa<DbgInfoIntrinsic>(I)) 333 return true; 334 335 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 336 if (II->getIntrinsicID() == Intrinsic::lifetime_end && 337 llvm::findAllocaForValue(II->getArgOperand(1))) 338 return true; 339 340 // FIXME: We can move load/store/call/free instructions above the call if the 341 // call does not mod/ref the memory location being processed. 342 if (I->mayHaveSideEffects()) // This also handles volatile loads. 343 return false; 344 345 if (LoadInst *L = dyn_cast<LoadInst>(I)) { 346 // Loads may always be moved above calls without side effects. 347 if (CI->mayHaveSideEffects()) { 348 // Non-volatile loads may be moved above a call with side effects if it 349 // does not write to memory and the load provably won't trap. 350 // Writes to memory only matter if they may alias the pointer 351 // being loaded from. 352 const DataLayout &DL = L->getModule()->getDataLayout(); 353 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) || 354 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(), 355 L->getAlign(), DL, L)) 356 return false; 357 } 358 } 359 360 // Otherwise, if this is a side-effect free instruction, check to make sure 361 // that it does not use the return value of the call. If it doesn't use the 362 // return value of the call, it must only use things that are defined before 363 // the call, or movable instructions between the call and the instruction 364 // itself. 365 return !is_contained(I->operands(), CI); 366 } 367 368 static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) { 369 if (!I->isAssociative() || !I->isCommutative()) 370 return false; 371 372 assert(I->getNumOperands() == 2 && 373 "Associative/commutative operations should have 2 args!"); 374 375 // Exactly one operand should be the result of the call instruction. 376 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) || 377 (I->getOperand(0) != CI && I->getOperand(1) != CI)) 378 return false; 379 380 // The only user of this instruction we allow is a single return instruction. 381 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back())) 382 return false; 383 384 return true; 385 } 386 387 static Instruction *firstNonDbg(BasicBlock::iterator I) { 388 while (isa<DbgInfoIntrinsic>(I)) 389 ++I; 390 return &*I; 391 } 392 393 namespace { 394 class TailRecursionEliminator { 395 Function &F; 396 const TargetTransformInfo *TTI; 397 AliasAnalysis *AA; 398 OptimizationRemarkEmitter *ORE; 399 DomTreeUpdater &DTU; 400 401 // The below are shared state we want to have available when eliminating any 402 // calls in the function. There values should be populated by 403 // createTailRecurseLoopHeader the first time we find a call we can eliminate. 404 BasicBlock *HeaderBB = nullptr; 405 SmallVector<PHINode *, 8> ArgumentPHIs; 406 407 // PHI node to store our return value. 408 PHINode *RetPN = nullptr; 409 410 // i1 PHI node to track if we have a valid return value stored in RetPN. 411 PHINode *RetKnownPN = nullptr; 412 413 // Vector of select instructions we insereted. These selects use RetKnownPN 414 // to either propagate RetPN or select a new return value. 415 SmallVector<SelectInst *, 8> RetSelects; 416 417 // The below are shared state needed when performing accumulator recursion. 418 // There values should be populated by insertAccumulator the first time we 419 // find an elimination that requires an accumulator. 420 421 // PHI node to store our current accumulated value. 422 PHINode *AccPN = nullptr; 423 424 // The instruction doing the accumulating. 425 Instruction *AccumulatorRecursionInstr = nullptr; 426 427 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI, 428 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, 429 DomTreeUpdater &DTU) 430 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU) {} 431 432 CallInst *findTRECandidate(BasicBlock *BB); 433 434 void createTailRecurseLoopHeader(CallInst *CI); 435 436 void insertAccumulator(Instruction *AccRecInstr); 437 438 bool eliminateCall(CallInst *CI); 439 440 void cleanupAndFinalize(); 441 442 bool processBlock(BasicBlock &BB); 443 444 void copyByValueOperandIntoLocalTemp(CallInst *CI, int OpndIdx); 445 446 void copyLocalTempOfByValueOperandIntoArguments(CallInst *CI, int OpndIdx); 447 448 public: 449 static bool eliminate(Function &F, const TargetTransformInfo *TTI, 450 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, 451 DomTreeUpdater &DTU); 452 }; 453 } // namespace 454 455 CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) { 456 Instruction *TI = BB->getTerminator(); 457 458 if (&BB->front() == TI) // Make sure there is something before the terminator. 459 return nullptr; 460 461 // Scan backwards from the return, checking to see if there is a tail call in 462 // this block. If so, set CI to it. 463 CallInst *CI = nullptr; 464 BasicBlock::iterator BBI(TI); 465 while (true) { 466 CI = dyn_cast<CallInst>(BBI); 467 if (CI && CI->getCalledFunction() == &F) 468 break; 469 470 if (BBI == BB->begin()) 471 return nullptr; // Didn't find a potential tail call. 472 --BBI; 473 } 474 475 assert((!CI->isTailCall() || !CI->isNoTailCall()) && 476 "Incompatible call site attributes(Tail,NoTail)"); 477 if (!CI->isTailCall()) 478 return nullptr; 479 480 // As a special case, detect code like this: 481 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call 482 // and disable this xform in this case, because the code generator will 483 // lower the call to fabs into inline code. 484 if (BB == &F.getEntryBlock() && 485 firstNonDbg(BB->front().getIterator()) == CI && 486 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && 487 !TTI->isLoweredToCall(CI->getCalledFunction())) { 488 // A single-block function with just a call and a return. Check that 489 // the arguments match. 490 auto I = CI->arg_begin(), E = CI->arg_end(); 491 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end(); 492 for (; I != E && FI != FE; ++I, ++FI) 493 if (*I != &*FI) break; 494 if (I == E && FI == FE) 495 return nullptr; 496 } 497 498 return CI; 499 } 500 501 void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) { 502 HeaderBB = &F.getEntryBlock(); 503 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB); 504 NewEntry->takeName(HeaderBB); 505 HeaderBB->setName("tailrecurse"); 506 BranchInst *BI = BranchInst::Create(HeaderBB, NewEntry); 507 BI->setDebugLoc(CI->getDebugLoc()); 508 509 // Move all fixed sized allocas from HeaderBB to NewEntry. 510 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(), 511 NEBI = NewEntry->begin(); 512 OEBI != E;) 513 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++)) 514 if (isa<ConstantInt>(AI->getArraySize())) 515 AI->moveBefore(&*NEBI); 516 517 // Now that we have created a new block, which jumps to the entry 518 // block, insert a PHI node for each argument of the function. 519 // For now, we initialize each PHI to only have the real arguments 520 // which are passed in. 521 Instruction *InsertPos = &HeaderBB->front(); 522 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { 523 PHINode *PN = 524 PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos); 525 I->replaceAllUsesWith(PN); // Everyone use the PHI node now! 526 PN->addIncoming(&*I, NewEntry); 527 ArgumentPHIs.push_back(PN); 528 } 529 530 // If the function doen't return void, create the RetPN and RetKnownPN PHI 531 // nodes to track our return value. We initialize RetPN with poison and 532 // RetKnownPN with false since we can't know our return value at function 533 // entry. 534 Type *RetType = F.getReturnType(); 535 if (!RetType->isVoidTy()) { 536 Type *BoolType = Type::getInt1Ty(F.getContext()); 537 RetPN = PHINode::Create(RetType, 2, "ret.tr", InsertPos); 538 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr", InsertPos); 539 540 RetPN->addIncoming(PoisonValue::get(RetType), NewEntry); 541 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry); 542 } 543 544 // The entry block was changed from HeaderBB to NewEntry. 545 // The forward DominatorTree needs to be recalculated when the EntryBB is 546 // changed. In this corner-case we recalculate the entire tree. 547 DTU.recalculate(*NewEntry->getParent()); 548 } 549 550 void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) { 551 assert(!AccPN && "Trying to insert multiple accumulators"); 552 553 AccumulatorRecursionInstr = AccRecInstr; 554 555 // Start by inserting a new PHI node for the accumulator. 556 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB); 557 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1, 558 "accumulator.tr", &HeaderBB->front()); 559 560 // Loop over all of the predecessors of the tail recursion block. For the 561 // real entry into the function we seed the PHI with the identity constant for 562 // the accumulation operation. For any other existing branches to this block 563 // (due to other tail recursions eliminated) the accumulator is not modified. 564 // Because we haven't added the branch in the current block to HeaderBB yet, 565 // it will not show up as a predecessor. 566 for (pred_iterator PI = PB; PI != PE; ++PI) { 567 BasicBlock *P = *PI; 568 if (P == &F.getEntryBlock()) { 569 Constant *Identity = ConstantExpr::getBinOpIdentity( 570 AccRecInstr->getOpcode(), AccRecInstr->getType()); 571 AccPN->addIncoming(Identity, P); 572 } else { 573 AccPN->addIncoming(AccPN, P); 574 } 575 } 576 577 ++NumAccumAdded; 578 } 579 580 // Creates a copy of contents of ByValue operand of the specified 581 // call instruction into the newly created temporarily variable. 582 void TailRecursionEliminator::copyByValueOperandIntoLocalTemp(CallInst *CI, 583 int OpndIdx) { 584 Type *AggTy = CI->getParamByValType(OpndIdx); 585 assert(AggTy); 586 const DataLayout &DL = F.getParent()->getDataLayout(); 587 588 // Get alignment of byVal operand. 589 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne()); 590 591 // Create alloca for temporarily byval operands. 592 // Put alloca into the entry block. 593 Value *NewAlloca = new AllocaInst( 594 AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment, 595 CI->getArgOperand(OpndIdx)->getName(), &*F.getEntryBlock().begin()); 596 597 IRBuilder<> Builder(CI); 598 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy)); 599 600 // Copy data from byvalue operand into the temporarily variable. 601 Builder.CreateMemCpy(NewAlloca, /*DstAlign*/ Alignment, 602 CI->getArgOperand(OpndIdx), 603 /*SrcAlign*/ Alignment, Size); 604 CI->setArgOperand(OpndIdx, NewAlloca); 605 } 606 607 // Creates a copy from temporarily variable(keeping value of ByVal argument) 608 // into the corresponding function argument location. 609 void TailRecursionEliminator::copyLocalTempOfByValueOperandIntoArguments( 610 CallInst *CI, int OpndIdx) { 611 Type *AggTy = CI->getParamByValType(OpndIdx); 612 assert(AggTy); 613 const DataLayout &DL = F.getParent()->getDataLayout(); 614 615 // Get alignment of byVal operand. 616 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne()); 617 618 IRBuilder<> Builder(CI); 619 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy)); 620 621 // Copy data from the temporarily variable into corresponding 622 // function argument location. 623 Builder.CreateMemCpy(F.getArg(OpndIdx), /*DstAlign*/ Alignment, 624 CI->getArgOperand(OpndIdx), 625 /*SrcAlign*/ Alignment, Size); 626 } 627 628 bool TailRecursionEliminator::eliminateCall(CallInst *CI) { 629 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator()); 630 631 // Ok, we found a potential tail call. We can currently only transform the 632 // tail call if all of the instructions between the call and the return are 633 // movable to above the call itself, leaving the call next to the return. 634 // Check that this is the case now. 635 Instruction *AccRecInstr = nullptr; 636 BasicBlock::iterator BBI(CI); 637 for (++BBI; &*BBI != Ret; ++BBI) { 638 if (canMoveAboveCall(&*BBI, CI, AA)) 639 continue; 640 641 // If we can't move the instruction above the call, it might be because it 642 // is an associative and commutative operation that could be transformed 643 // using accumulator recursion elimination. Check to see if this is the 644 // case, and if so, remember which instruction accumulates for later. 645 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI)) 646 return false; // We cannot eliminate the tail recursion! 647 648 // Yes, this is accumulator recursion. Remember which instruction 649 // accumulates. 650 AccRecInstr = &*BBI; 651 } 652 653 BasicBlock *BB = Ret->getParent(); 654 655 using namespace ore; 656 ORE->emit([&]() { 657 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI) 658 << "transforming tail recursion into loop"; 659 }); 660 661 // OK! We can transform this tail call. If this is the first one found, 662 // create the new entry block, allowing us to branch back to the old entry. 663 if (!HeaderBB) 664 createTailRecurseLoopHeader(CI); 665 666 // Copy values of ByVal operands into local temporarily variables. 667 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) { 668 if (CI->isByValArgument(I)) 669 copyByValueOperandIntoLocalTemp(CI, I); 670 } 671 672 // Ok, now that we know we have a pseudo-entry block WITH all of the 673 // required PHI nodes, add entries into the PHI node for the actual 674 // parameters passed into the tail-recursive call. 675 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) { 676 if (CI->isByValArgument(I)) { 677 copyLocalTempOfByValueOperandIntoArguments(CI, I); 678 // When eliminating a tail call, we modify the values of the arguments. 679 // Therefore, if the byval parameter has a readonly attribute, we have to 680 // remove it. It is safe because, from the perspective of a caller, the 681 // byval parameter is always treated as "readonly," even if the readonly 682 // attribute is removed. 683 F.removeParamAttr(I, Attribute::ReadOnly); 684 ArgumentPHIs[I]->addIncoming(F.getArg(I), BB); 685 } else 686 ArgumentPHIs[I]->addIncoming(CI->getArgOperand(I), BB); 687 } 688 689 if (AccRecInstr) { 690 insertAccumulator(AccRecInstr); 691 692 // Rewrite the accumulator recursion instruction so that it does not use 693 // the result of the call anymore, instead, use the PHI node we just 694 // inserted. 695 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN); 696 } 697 698 // Update our return value tracking 699 if (RetPN) { 700 if (Ret->getReturnValue() == CI || AccRecInstr) { 701 // Defer selecting a return value 702 RetPN->addIncoming(RetPN, BB); 703 RetKnownPN->addIncoming(RetKnownPN, BB); 704 } else { 705 // We found a return value we want to use, insert a select instruction to 706 // select it if we don't already know what our return value will be and 707 // store the result in our return value PHI node. 708 SelectInst *SI = SelectInst::Create( 709 RetKnownPN, RetPN, Ret->getReturnValue(), "current.ret.tr", Ret); 710 RetSelects.push_back(SI); 711 712 RetPN->addIncoming(SI, BB); 713 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB); 714 } 715 716 if (AccPN) 717 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB); 718 } 719 720 // Now that all of the PHI nodes are in place, remove the call and 721 // ret instructions, replacing them with an unconditional branch. 722 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret); 723 NewBI->setDebugLoc(CI->getDebugLoc()); 724 725 Ret->eraseFromParent(); // Remove return. 726 CI->eraseFromParent(); // Remove call. 727 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}}); 728 ++NumEliminated; 729 return true; 730 } 731 732 void TailRecursionEliminator::cleanupAndFinalize() { 733 // If we eliminated any tail recursions, it's possible that we inserted some 734 // silly PHI nodes which just merge an initial value (the incoming operand) 735 // with themselves. Check to see if we did and clean up our mess if so. This 736 // occurs when a function passes an argument straight through to its tail 737 // call. 738 for (PHINode *PN : ArgumentPHIs) { 739 // If the PHI Node is a dynamic constant, replace it with the value it is. 740 if (Value *PNV = simplifyInstruction(PN, F.getParent()->getDataLayout())) { 741 PN->replaceAllUsesWith(PNV); 742 PN->eraseFromParent(); 743 } 744 } 745 746 if (RetPN) { 747 if (RetSelects.empty()) { 748 // If we didn't insert any select instructions, then we know we didn't 749 // store a return value and we can remove the PHI nodes we inserted. 750 RetPN->dropAllReferences(); 751 RetPN->eraseFromParent(); 752 753 RetKnownPN->dropAllReferences(); 754 RetKnownPN->eraseFromParent(); 755 756 if (AccPN) { 757 // We need to insert a copy of our accumulator instruction before any 758 // return in the function, and return its result instead. 759 Instruction *AccRecInstr = AccumulatorRecursionInstr; 760 for (BasicBlock &BB : F) { 761 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()); 762 if (!RI) 763 continue; 764 765 Instruction *AccRecInstrNew = AccRecInstr->clone(); 766 AccRecInstrNew->setName("accumulator.ret.tr"); 767 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN, 768 RI->getOperand(0)); 769 AccRecInstrNew->insertBefore(RI); 770 RI->setOperand(0, AccRecInstrNew); 771 } 772 } 773 } else { 774 // We need to insert a select instruction before any return left in the 775 // function to select our stored return value if we have one. 776 for (BasicBlock &BB : F) { 777 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()); 778 if (!RI) 779 continue; 780 781 SelectInst *SI = SelectInst::Create( 782 RetKnownPN, RetPN, RI->getOperand(0), "current.ret.tr", RI); 783 RetSelects.push_back(SI); 784 RI->setOperand(0, SI); 785 } 786 787 if (AccPN) { 788 // We need to insert a copy of our accumulator instruction before any 789 // of the selects we inserted, and select its result instead. 790 Instruction *AccRecInstr = AccumulatorRecursionInstr; 791 for (SelectInst *SI : RetSelects) { 792 Instruction *AccRecInstrNew = AccRecInstr->clone(); 793 AccRecInstrNew->setName("accumulator.ret.tr"); 794 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN, 795 SI->getFalseValue()); 796 AccRecInstrNew->insertBefore(SI); 797 SI->setFalseValue(AccRecInstrNew); 798 } 799 } 800 } 801 } 802 } 803 804 bool TailRecursionEliminator::processBlock(BasicBlock &BB) { 805 Instruction *TI = BB.getTerminator(); 806 807 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 808 if (BI->isConditional()) 809 return false; 810 811 BasicBlock *Succ = BI->getSuccessor(0); 812 ReturnInst *Ret = dyn_cast<ReturnInst>(Succ->getFirstNonPHIOrDbg(true)); 813 814 if (!Ret) 815 return false; 816 817 CallInst *CI = findTRECandidate(&BB); 818 819 if (!CI) 820 return false; 821 822 LLVM_DEBUG(dbgs() << "FOLDING: " << *Succ 823 << "INTO UNCOND BRANCH PRED: " << BB); 824 FoldReturnIntoUncondBranch(Ret, Succ, &BB, &DTU); 825 ++NumRetDuped; 826 827 // If all predecessors of Succ have been eliminated by 828 // FoldReturnIntoUncondBranch, delete it. It is important to empty it, 829 // because the ret instruction in there is still using a value which 830 // eliminateCall will attempt to remove. This block can only contain 831 // instructions that can't have uses, therefore it is safe to remove. 832 if (pred_empty(Succ)) 833 DTU.deleteBB(Succ); 834 835 eliminateCall(CI); 836 return true; 837 } else if (isa<ReturnInst>(TI)) { 838 CallInst *CI = findTRECandidate(&BB); 839 840 if (CI) 841 return eliminateCall(CI); 842 } 843 844 return false; 845 } 846 847 bool TailRecursionEliminator::eliminate(Function &F, 848 const TargetTransformInfo *TTI, 849 AliasAnalysis *AA, 850 OptimizationRemarkEmitter *ORE, 851 DomTreeUpdater &DTU) { 852 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 853 return false; 854 855 bool MadeChange = false; 856 MadeChange |= markTails(F, ORE); 857 858 // If this function is a varargs function, we won't be able to PHI the args 859 // right, so don't even try to convert it... 860 if (F.getFunctionType()->isVarArg()) 861 return MadeChange; 862 863 if (!canTRE(F)) 864 return MadeChange; 865 866 // Change any tail recursive calls to loops. 867 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU); 868 869 for (BasicBlock &BB : F) 870 MadeChange |= TRE.processBlock(BB); 871 872 TRE.cleanupAndFinalize(); 873 874 return MadeChange; 875 } 876 877 namespace { 878 struct TailCallElim : public FunctionPass { 879 static char ID; // Pass identification, replacement for typeid 880 TailCallElim() : FunctionPass(ID) { 881 initializeTailCallElimPass(*PassRegistry::getPassRegistry()); 882 } 883 884 void getAnalysisUsage(AnalysisUsage &AU) const override { 885 AU.addRequired<TargetTransformInfoWrapperPass>(); 886 AU.addRequired<AAResultsWrapperPass>(); 887 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 888 AU.addPreserved<GlobalsAAWrapperPass>(); 889 AU.addPreserved<DominatorTreeWrapperPass>(); 890 AU.addPreserved<PostDominatorTreeWrapperPass>(); 891 } 892 893 bool runOnFunction(Function &F) override { 894 if (skipFunction(F)) 895 return false; 896 897 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 898 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 899 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>(); 900 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr; 901 // There is no noticable performance difference here between Lazy and Eager 902 // UpdateStrategy based on some test results. It is feasible to switch the 903 // UpdateStrategy to Lazy if we find it profitable later. 904 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager); 905 906 return TailRecursionEliminator::eliminate( 907 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F), 908 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 909 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU); 910 } 911 }; 912 } 913 914 char TailCallElim::ID = 0; 915 INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination", 916 false, false) 917 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 918 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 919 INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination", 920 false, false) 921 922 // Public interface to the TailCallElimination pass 923 FunctionPass *llvm::createTailCallEliminationPass() { 924 return new TailCallElim(); 925 } 926 927 PreservedAnalyses TailCallElimPass::run(Function &F, 928 FunctionAnalysisManager &AM) { 929 930 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F); 931 AliasAnalysis &AA = AM.getResult<AAManager>(F); 932 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 933 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F); 934 auto *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F); 935 // There is no noticable performance difference here between Lazy and Eager 936 // UpdateStrategy based on some test results. It is feasible to switch the 937 // UpdateStrategy to Lazy if we find it profitable later. 938 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager); 939 bool Changed = TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU); 940 941 if (!Changed) 942 return PreservedAnalyses::all(); 943 PreservedAnalyses PA; 944 PA.preserve<DominatorTreeAnalysis>(); 945 PA.preserve<PostDominatorTreeAnalysis>(); 946 return PA; 947 } 948