1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements inlining of a function into a call site, resolving 10 // parameters and the return value as appropriate. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/None.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/ADT/iterator_range.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/BlockFrequencyInfo.h" 26 #include "llvm/Analysis/CallGraph.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/EHPersonalities.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/ObjCARCAnalysisUtils.h" 31 #include "llvm/Analysis/ObjCARCUtil.h" 32 #include "llvm/Analysis/ProfileSummaryInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Analysis/VectorUtils.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CFG.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DIBuilder.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugInfo.h" 43 #include "llvm/IR/DebugInfoMetadata.h" 44 #include "llvm/IR/DebugLoc.h" 45 #include "llvm/IR/DerivedTypes.h" 46 #include "llvm/IR/Dominators.h" 47 #include "llvm/IR/Function.h" 48 #include "llvm/IR/IRBuilder.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/MDBuilder.h" 57 #include "llvm/IR/Metadata.h" 58 #include "llvm/IR/Module.h" 59 #include "llvm/IR/Type.h" 60 #include "llvm/IR/User.h" 61 #include "llvm/IR/Value.h" 62 #include "llvm/Support/Casting.h" 63 #include "llvm/Support/CommandLine.h" 64 #include "llvm/Support/ErrorHandling.h" 65 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 66 #include "llvm/Transforms/Utils/Cloning.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/ValueMapper.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstdint> 72 #include <iterator> 73 #include <limits> 74 #include <string> 75 #include <utility> 76 #include <vector> 77 78 using namespace llvm; 79 using ProfileCount = Function::ProfileCount; 80 81 static cl::opt<bool> 82 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 83 cl::Hidden, 84 cl::desc("Convert noalias attributes to metadata during inlining.")); 85 86 static cl::opt<bool> 87 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, 88 cl::ZeroOrMore, cl::init(true), 89 cl::desc("Use the llvm.experimental.noalias.scope.decl " 90 "intrinsic during inlining.")); 91 92 // Disabled by default, because the added alignment assumptions may increase 93 // compile-time and block optimizations. This option is not suitable for use 94 // with frontends that emit comprehensive parameter alignment annotations. 95 static cl::opt<bool> 96 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 97 cl::init(false), cl::Hidden, 98 cl::desc("Convert align attributes to assumptions during inlining.")); 99 100 static cl::opt<bool> UpdateReturnAttributes( 101 "update-return-attrs", cl::init(true), cl::Hidden, 102 cl::desc("Update return attributes on calls within inlined body")); 103 104 static cl::opt<unsigned> InlinerAttributeWindow( 105 "max-inst-checked-for-throw-during-inlining", cl::Hidden, 106 cl::desc("the maximum number of instructions analyzed for may throw during " 107 "attribute inference in inlined body"), 108 cl::init(4)); 109 110 namespace { 111 112 /// A class for recording information about inlining a landing pad. 113 class LandingPadInliningInfo { 114 /// Destination of the invoke's unwind. 115 BasicBlock *OuterResumeDest; 116 117 /// Destination for the callee's resume. 118 BasicBlock *InnerResumeDest = nullptr; 119 120 /// LandingPadInst associated with the invoke. 121 LandingPadInst *CallerLPad = nullptr; 122 123 /// PHI for EH values from landingpad insts. 124 PHINode *InnerEHValuesPHI = nullptr; 125 126 SmallVector<Value*, 8> UnwindDestPHIValues; 127 128 public: 129 LandingPadInliningInfo(InvokeInst *II) 130 : OuterResumeDest(II->getUnwindDest()) { 131 // If there are PHI nodes in the unwind destination block, we need to keep 132 // track of which values came into them from the invoke before removing 133 // the edge from this block. 134 BasicBlock *InvokeBB = II->getParent(); 135 BasicBlock::iterator I = OuterResumeDest->begin(); 136 for (; isa<PHINode>(I); ++I) { 137 // Save the value to use for this edge. 138 PHINode *PHI = cast<PHINode>(I); 139 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 140 } 141 142 CallerLPad = cast<LandingPadInst>(I); 143 } 144 145 /// The outer unwind destination is the target of 146 /// unwind edges introduced for calls within the inlined function. 147 BasicBlock *getOuterResumeDest() const { 148 return OuterResumeDest; 149 } 150 151 BasicBlock *getInnerResumeDest(); 152 153 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 154 155 /// Forward the 'resume' instruction to the caller's landing pad block. 156 /// When the landing pad block has only one predecessor, this is 157 /// a simple branch. When there is more than one predecessor, we need to 158 /// split the landing pad block after the landingpad instruction and jump 159 /// to there. 160 void forwardResume(ResumeInst *RI, 161 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 162 163 /// Add incoming-PHI values to the unwind destination block for the given 164 /// basic block, using the values for the original invoke's source block. 165 void addIncomingPHIValuesFor(BasicBlock *BB) const { 166 addIncomingPHIValuesForInto(BB, OuterResumeDest); 167 } 168 169 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 170 BasicBlock::iterator I = dest->begin(); 171 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 172 PHINode *phi = cast<PHINode>(I); 173 phi->addIncoming(UnwindDestPHIValues[i], src); 174 } 175 } 176 }; 177 178 } // end anonymous namespace 179 180 /// Get or create a target for the branch from ResumeInsts. 181 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 182 if (InnerResumeDest) return InnerResumeDest; 183 184 // Split the landing pad. 185 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 186 InnerResumeDest = 187 OuterResumeDest->splitBasicBlock(SplitPoint, 188 OuterResumeDest->getName() + ".body"); 189 190 // The number of incoming edges we expect to the inner landing pad. 191 const unsigned PHICapacity = 2; 192 193 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 194 Instruction *InsertPoint = &InnerResumeDest->front(); 195 BasicBlock::iterator I = OuterResumeDest->begin(); 196 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 197 PHINode *OuterPHI = cast<PHINode>(I); 198 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 199 OuterPHI->getName() + ".lpad-body", 200 InsertPoint); 201 OuterPHI->replaceAllUsesWith(InnerPHI); 202 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 203 } 204 205 // Create a PHI for the exception values. 206 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 207 "eh.lpad-body", InsertPoint); 208 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 209 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 210 211 // All done. 212 return InnerResumeDest; 213 } 214 215 /// Forward the 'resume' instruction to the caller's landing pad block. 216 /// When the landing pad block has only one predecessor, this is a simple 217 /// branch. When there is more than one predecessor, we need to split the 218 /// landing pad block after the landingpad instruction and jump to there. 219 void LandingPadInliningInfo::forwardResume( 220 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 221 BasicBlock *Dest = getInnerResumeDest(); 222 BasicBlock *Src = RI->getParent(); 223 224 BranchInst::Create(Dest, Src); 225 226 // Update the PHIs in the destination. They were inserted in an order which 227 // makes this work. 228 addIncomingPHIValuesForInto(Src, Dest); 229 230 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 231 RI->eraseFromParent(); 232 } 233 234 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 235 static Value *getParentPad(Value *EHPad) { 236 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 237 return FPI->getParentPad(); 238 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 239 } 240 241 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>; 242 243 /// Helper for getUnwindDestToken that does the descendant-ward part of 244 /// the search. 245 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 246 UnwindDestMemoTy &MemoMap) { 247 SmallVector<Instruction *, 8> Worklist(1, EHPad); 248 249 while (!Worklist.empty()) { 250 Instruction *CurrentPad = Worklist.pop_back_val(); 251 // We only put pads on the worklist that aren't in the MemoMap. When 252 // we find an unwind dest for a pad we may update its ancestors, but 253 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 254 // so they should never get updated while queued on the worklist. 255 assert(!MemoMap.count(CurrentPad)); 256 Value *UnwindDestToken = nullptr; 257 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 258 if (CatchSwitch->hasUnwindDest()) { 259 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 260 } else { 261 // Catchswitch doesn't have a 'nounwind' variant, and one might be 262 // annotated as "unwinds to caller" when really it's nounwind (see 263 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 264 // parent's unwind dest from this. We can check its catchpads' 265 // descendants, since they might include a cleanuppad with an 266 // "unwinds to caller" cleanupret, which can be trusted. 267 for (auto HI = CatchSwitch->handler_begin(), 268 HE = CatchSwitch->handler_end(); 269 HI != HE && !UnwindDestToken; ++HI) { 270 BasicBlock *HandlerBlock = *HI; 271 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 272 for (User *Child : CatchPad->users()) { 273 // Intentionally ignore invokes here -- since the catchswitch is 274 // marked "unwind to caller", it would be a verifier error if it 275 // contained an invoke which unwinds out of it, so any invoke we'd 276 // encounter must unwind to some child of the catch. 277 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 278 continue; 279 280 Instruction *ChildPad = cast<Instruction>(Child); 281 auto Memo = MemoMap.find(ChildPad); 282 if (Memo == MemoMap.end()) { 283 // Haven't figured out this child pad yet; queue it. 284 Worklist.push_back(ChildPad); 285 continue; 286 } 287 // We've already checked this child, but might have found that 288 // it offers no proof either way. 289 Value *ChildUnwindDestToken = Memo->second; 290 if (!ChildUnwindDestToken) 291 continue; 292 // We already know the child's unwind dest, which can either 293 // be ConstantTokenNone to indicate unwind to caller, or can 294 // be another child of the catchpad. Only the former indicates 295 // the unwind dest of the catchswitch. 296 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 297 UnwindDestToken = ChildUnwindDestToken; 298 break; 299 } 300 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 301 } 302 } 303 } 304 } else { 305 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 306 for (User *U : CleanupPad->users()) { 307 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 308 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 309 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 310 else 311 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 312 break; 313 } 314 Value *ChildUnwindDestToken; 315 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 316 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 317 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 318 Instruction *ChildPad = cast<Instruction>(U); 319 auto Memo = MemoMap.find(ChildPad); 320 if (Memo == MemoMap.end()) { 321 // Haven't resolved this child yet; queue it and keep searching. 322 Worklist.push_back(ChildPad); 323 continue; 324 } 325 // We've checked this child, but still need to ignore it if it 326 // had no proof either way. 327 ChildUnwindDestToken = Memo->second; 328 if (!ChildUnwindDestToken) 329 continue; 330 } else { 331 // Not a relevant user of the cleanuppad 332 continue; 333 } 334 // In a well-formed program, the child/invoke must either unwind to 335 // an(other) child of the cleanup, or exit the cleanup. In the 336 // first case, continue searching. 337 if (isa<Instruction>(ChildUnwindDestToken) && 338 getParentPad(ChildUnwindDestToken) == CleanupPad) 339 continue; 340 UnwindDestToken = ChildUnwindDestToken; 341 break; 342 } 343 } 344 // If we haven't found an unwind dest for CurrentPad, we may have queued its 345 // children, so move on to the next in the worklist. 346 if (!UnwindDestToken) 347 continue; 348 349 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 350 // any ancestors of CurrentPad up to but not including UnwindDestToken's 351 // parent pad. Record this in the memo map, and check to see if the 352 // original EHPad being queried is one of the ones exited. 353 Value *UnwindParent; 354 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 355 UnwindParent = getParentPad(UnwindPad); 356 else 357 UnwindParent = nullptr; 358 bool ExitedOriginalPad = false; 359 for (Instruction *ExitedPad = CurrentPad; 360 ExitedPad && ExitedPad != UnwindParent; 361 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 362 // Skip over catchpads since they just follow their catchswitches. 363 if (isa<CatchPadInst>(ExitedPad)) 364 continue; 365 MemoMap[ExitedPad] = UnwindDestToken; 366 ExitedOriginalPad |= (ExitedPad == EHPad); 367 } 368 369 if (ExitedOriginalPad) 370 return UnwindDestToken; 371 372 // Continue the search. 373 } 374 375 // No definitive information is contained within this funclet. 376 return nullptr; 377 } 378 379 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 380 /// return that pad instruction. If it unwinds to caller, return 381 /// ConstantTokenNone. If it does not have a definitive unwind destination, 382 /// return nullptr. 383 /// 384 /// This routine gets invoked for calls in funclets in inlinees when inlining 385 /// an invoke. Since many funclets don't have calls inside them, it's queried 386 /// on-demand rather than building a map of pads to unwind dests up front. 387 /// Determining a funclet's unwind dest may require recursively searching its 388 /// descendants, and also ancestors and cousins if the descendants don't provide 389 /// an answer. Since most funclets will have their unwind dest immediately 390 /// available as the unwind dest of a catchswitch or cleanupret, this routine 391 /// searches top-down from the given pad and then up. To avoid worst-case 392 /// quadratic run-time given that approach, it uses a memo map to avoid 393 /// re-processing funclet trees. The callers that rewrite the IR as they go 394 /// take advantage of this, for correctness, by checking/forcing rewritten 395 /// pads' entries to match the original callee view. 396 static Value *getUnwindDestToken(Instruction *EHPad, 397 UnwindDestMemoTy &MemoMap) { 398 // Catchpads unwind to the same place as their catchswitch; 399 // redirct any queries on catchpads so the code below can 400 // deal with just catchswitches and cleanuppads. 401 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 402 EHPad = CPI->getCatchSwitch(); 403 404 // Check if we've already determined the unwind dest for this pad. 405 auto Memo = MemoMap.find(EHPad); 406 if (Memo != MemoMap.end()) 407 return Memo->second; 408 409 // Search EHPad and, if necessary, its descendants. 410 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 411 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 412 if (UnwindDestToken) 413 return UnwindDestToken; 414 415 // No information is available for this EHPad from itself or any of its 416 // descendants. An unwind all the way out to a pad in the caller would 417 // need also to agree with the unwind dest of the parent funclet, so 418 // search up the chain to try to find a funclet with information. Put 419 // null entries in the memo map to avoid re-processing as we go up. 420 MemoMap[EHPad] = nullptr; 421 #ifndef NDEBUG 422 SmallPtrSet<Instruction *, 4> TempMemos; 423 TempMemos.insert(EHPad); 424 #endif 425 Instruction *LastUselessPad = EHPad; 426 Value *AncestorToken; 427 for (AncestorToken = getParentPad(EHPad); 428 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 429 AncestorToken = getParentPad(AncestorToken)) { 430 // Skip over catchpads since they just follow their catchswitches. 431 if (isa<CatchPadInst>(AncestorPad)) 432 continue; 433 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 434 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 435 // call to getUnwindDestToken, that would mean that AncestorPad had no 436 // information in itself, its descendants, or its ancestors. If that 437 // were the case, then we should also have recorded the lack of information 438 // for the descendant that we're coming from. So assert that we don't 439 // find a null entry in the MemoMap for AncestorPad. 440 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 441 auto AncestorMemo = MemoMap.find(AncestorPad); 442 if (AncestorMemo == MemoMap.end()) { 443 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 444 } else { 445 UnwindDestToken = AncestorMemo->second; 446 } 447 if (UnwindDestToken) 448 break; 449 LastUselessPad = AncestorPad; 450 MemoMap[LastUselessPad] = nullptr; 451 #ifndef NDEBUG 452 TempMemos.insert(LastUselessPad); 453 #endif 454 } 455 456 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 457 // returned nullptr (and likewise for EHPad and any of its ancestors up to 458 // LastUselessPad), so LastUselessPad has no information from below. Since 459 // getUnwindDestTokenHelper must investigate all downward paths through 460 // no-information nodes to prove that a node has no information like this, 461 // and since any time it finds information it records it in the MemoMap for 462 // not just the immediately-containing funclet but also any ancestors also 463 // exited, it must be the case that, walking downward from LastUselessPad, 464 // visiting just those nodes which have not been mapped to an unwind dest 465 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 466 // they are just used to keep getUnwindDestTokenHelper from repeating work), 467 // any node visited must have been exhaustively searched with no information 468 // for it found. 469 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 470 while (!Worklist.empty()) { 471 Instruction *UselessPad = Worklist.pop_back_val(); 472 auto Memo = MemoMap.find(UselessPad); 473 if (Memo != MemoMap.end() && Memo->second) { 474 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 475 // that it is a funclet that does have information about unwinding to 476 // a particular destination; its parent was a useless pad. 477 // Since its parent has no information, the unwind edge must not escape 478 // the parent, and must target a sibling of this pad. This local unwind 479 // gives us no information about EHPad. Leave it and the subtree rooted 480 // at it alone. 481 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 482 continue; 483 } 484 // We know we don't have information for UselesPad. If it has an entry in 485 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 486 // added on this invocation of getUnwindDestToken; if a previous invocation 487 // recorded nullptr, it would have had to prove that the ancestors of 488 // UselessPad, which include LastUselessPad, had no information, and that 489 // in turn would have required proving that the descendants of 490 // LastUselesPad, which include EHPad, have no information about 491 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 492 // the MemoMap on that invocation, which isn't the case if we got here. 493 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 494 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 495 // information that we'd be contradicting by making a map entry for it 496 // (which is something that getUnwindDestTokenHelper must have proved for 497 // us to get here). Just assert on is direct users here; the checks in 498 // this downward walk at its descendants will verify that they don't have 499 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 500 // unwind edges or unwind to a sibling). 501 MemoMap[UselessPad] = UnwindDestToken; 502 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 503 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 504 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 505 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 506 for (User *U : CatchPad->users()) { 507 assert( 508 (!isa<InvokeInst>(U) || 509 (getParentPad( 510 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 511 CatchPad)) && 512 "Expected useless pad"); 513 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 514 Worklist.push_back(cast<Instruction>(U)); 515 } 516 } 517 } else { 518 assert(isa<CleanupPadInst>(UselessPad)); 519 for (User *U : UselessPad->users()) { 520 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 521 assert((!isa<InvokeInst>(U) || 522 (getParentPad( 523 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 524 UselessPad)) && 525 "Expected useless pad"); 526 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 527 Worklist.push_back(cast<Instruction>(U)); 528 } 529 } 530 } 531 532 return UnwindDestToken; 533 } 534 535 /// When we inline a basic block into an invoke, 536 /// we have to turn all of the calls that can throw into invokes. 537 /// This function analyze BB to see if there are any calls, and if so, 538 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 539 /// nodes in that block with the values specified in InvokeDestPHIValues. 540 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 541 BasicBlock *BB, BasicBlock *UnwindEdge, 542 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 543 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 544 // We only need to check for function calls: inlined invoke 545 // instructions require no special handling. 546 CallInst *CI = dyn_cast<CallInst>(&I); 547 548 if (!CI || CI->doesNotThrow()) 549 continue; 550 551 if (CI->isInlineAsm()) { 552 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand()); 553 if (!IA->canThrow()) { 554 continue; 555 } 556 } 557 558 // We do not need to (and in fact, cannot) convert possibly throwing calls 559 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 560 // invokes. The caller's "segment" of the deoptimization continuation 561 // attached to the newly inlined @llvm.experimental_deoptimize 562 // (resp. @llvm.experimental.guard) call should contain the exception 563 // handling logic, if any. 564 if (auto *F = CI->getCalledFunction()) 565 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 566 F->getIntrinsicID() == Intrinsic::experimental_guard) 567 continue; 568 569 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 570 // This call is nested inside a funclet. If that funclet has an unwind 571 // destination within the inlinee, then unwinding out of this call would 572 // be UB. Rewriting this call to an invoke which targets the inlined 573 // invoke's unwind dest would give the call's parent funclet multiple 574 // unwind destinations, which is something that subsequent EH table 575 // generation can't handle and that the veirifer rejects. So when we 576 // see such a call, leave it as a call. 577 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 578 Value *UnwindDestToken = 579 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 580 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 581 continue; 582 #ifndef NDEBUG 583 Instruction *MemoKey; 584 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 585 MemoKey = CatchPad->getCatchSwitch(); 586 else 587 MemoKey = FuncletPad; 588 assert(FuncletUnwindMap->count(MemoKey) && 589 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 590 "must get memoized to avoid confusing later searches"); 591 #endif // NDEBUG 592 } 593 594 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 595 return BB; 596 } 597 return nullptr; 598 } 599 600 /// If we inlined an invoke site, we need to convert calls 601 /// in the body of the inlined function into invokes. 602 /// 603 /// II is the invoke instruction being inlined. FirstNewBlock is the first 604 /// block of the inlined code (the last block is the end of the function), 605 /// and InlineCodeInfo is information about the code that got inlined. 606 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 607 ClonedCodeInfo &InlinedCodeInfo) { 608 BasicBlock *InvokeDest = II->getUnwindDest(); 609 610 Function *Caller = FirstNewBlock->getParent(); 611 612 // The inlined code is currently at the end of the function, scan from the 613 // start of the inlined code to its end, checking for stuff we need to 614 // rewrite. 615 LandingPadInliningInfo Invoke(II); 616 617 // Get all of the inlined landing pad instructions. 618 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 619 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 620 I != E; ++I) 621 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 622 InlinedLPads.insert(II->getLandingPadInst()); 623 624 // Append the clauses from the outer landing pad instruction into the inlined 625 // landing pad instructions. 626 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 627 for (LandingPadInst *InlinedLPad : InlinedLPads) { 628 unsigned OuterNum = OuterLPad->getNumClauses(); 629 InlinedLPad->reserveClauses(OuterNum); 630 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 631 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 632 if (OuterLPad->isCleanup()) 633 InlinedLPad->setCleanup(true); 634 } 635 636 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 637 BB != E; ++BB) { 638 if (InlinedCodeInfo.ContainsCalls) 639 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 640 &*BB, Invoke.getOuterResumeDest())) 641 // Update any PHI nodes in the exceptional block to indicate that there 642 // is now a new entry in them. 643 Invoke.addIncomingPHIValuesFor(NewBB); 644 645 // Forward any resumes that are remaining here. 646 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 647 Invoke.forwardResume(RI, InlinedLPads); 648 } 649 650 // Now that everything is happy, we have one final detail. The PHI nodes in 651 // the exception destination block still have entries due to the original 652 // invoke instruction. Eliminate these entries (which might even delete the 653 // PHI node) now. 654 InvokeDest->removePredecessor(II->getParent()); 655 } 656 657 /// If we inlined an invoke site, we need to convert calls 658 /// in the body of the inlined function into invokes. 659 /// 660 /// II is the invoke instruction being inlined. FirstNewBlock is the first 661 /// block of the inlined code (the last block is the end of the function), 662 /// and InlineCodeInfo is information about the code that got inlined. 663 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 664 ClonedCodeInfo &InlinedCodeInfo) { 665 BasicBlock *UnwindDest = II->getUnwindDest(); 666 Function *Caller = FirstNewBlock->getParent(); 667 668 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 669 670 // If there are PHI nodes in the unwind destination block, we need to keep 671 // track of which values came into them from the invoke before removing the 672 // edge from this block. 673 SmallVector<Value *, 8> UnwindDestPHIValues; 674 BasicBlock *InvokeBB = II->getParent(); 675 for (PHINode &PHI : UnwindDest->phis()) { 676 // Save the value to use for this edge. 677 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB)); 678 } 679 680 // Add incoming-PHI values to the unwind destination block for the given basic 681 // block, using the values for the original invoke's source block. 682 auto UpdatePHINodes = [&](BasicBlock *Src) { 683 BasicBlock::iterator I = UnwindDest->begin(); 684 for (Value *V : UnwindDestPHIValues) { 685 PHINode *PHI = cast<PHINode>(I); 686 PHI->addIncoming(V, Src); 687 ++I; 688 } 689 }; 690 691 // This connects all the instructions which 'unwind to caller' to the invoke 692 // destination. 693 UnwindDestMemoTy FuncletUnwindMap; 694 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 695 BB != E; ++BB) { 696 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 697 if (CRI->unwindsToCaller()) { 698 auto *CleanupPad = CRI->getCleanupPad(); 699 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 700 CRI->eraseFromParent(); 701 UpdatePHINodes(&*BB); 702 // Finding a cleanupret with an unwind destination would confuse 703 // subsequent calls to getUnwindDestToken, so map the cleanuppad 704 // to short-circuit any such calls and recognize this as an "unwind 705 // to caller" cleanup. 706 assert(!FuncletUnwindMap.count(CleanupPad) || 707 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 708 FuncletUnwindMap[CleanupPad] = 709 ConstantTokenNone::get(Caller->getContext()); 710 } 711 } 712 713 Instruction *I = BB->getFirstNonPHI(); 714 if (!I->isEHPad()) 715 continue; 716 717 Instruction *Replacement = nullptr; 718 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 719 if (CatchSwitch->unwindsToCaller()) { 720 Value *UnwindDestToken; 721 if (auto *ParentPad = 722 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 723 // This catchswitch is nested inside another funclet. If that 724 // funclet has an unwind destination within the inlinee, then 725 // unwinding out of this catchswitch would be UB. Rewriting this 726 // catchswitch to unwind to the inlined invoke's unwind dest would 727 // give the parent funclet multiple unwind destinations, which is 728 // something that subsequent EH table generation can't handle and 729 // that the veirifer rejects. So when we see such a call, leave it 730 // as "unwind to caller". 731 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 732 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 733 continue; 734 } else { 735 // This catchswitch has no parent to inherit constraints from, and 736 // none of its descendants can have an unwind edge that exits it and 737 // targets another funclet in the inlinee. It may or may not have a 738 // descendant that definitively has an unwind to caller. In either 739 // case, we'll have to assume that any unwinds out of it may need to 740 // be routed to the caller, so treat it as though it has a definitive 741 // unwind to caller. 742 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 743 } 744 auto *NewCatchSwitch = CatchSwitchInst::Create( 745 CatchSwitch->getParentPad(), UnwindDest, 746 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 747 CatchSwitch); 748 for (BasicBlock *PadBB : CatchSwitch->handlers()) 749 NewCatchSwitch->addHandler(PadBB); 750 // Propagate info for the old catchswitch over to the new one in 751 // the unwind map. This also serves to short-circuit any subsequent 752 // checks for the unwind dest of this catchswitch, which would get 753 // confused if they found the outer handler in the callee. 754 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 755 Replacement = NewCatchSwitch; 756 } 757 } else if (!isa<FuncletPadInst>(I)) { 758 llvm_unreachable("unexpected EHPad!"); 759 } 760 761 if (Replacement) { 762 Replacement->takeName(I); 763 I->replaceAllUsesWith(Replacement); 764 I->eraseFromParent(); 765 UpdatePHINodes(&*BB); 766 } 767 } 768 769 if (InlinedCodeInfo.ContainsCalls) 770 for (Function::iterator BB = FirstNewBlock->getIterator(), 771 E = Caller->end(); 772 BB != E; ++BB) 773 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 774 &*BB, UnwindDest, &FuncletUnwindMap)) 775 // Update any PHI nodes in the exceptional block to indicate that there 776 // is now a new entry in them. 777 UpdatePHINodes(NewBB); 778 779 // Now that everything is happy, we have one final detail. The PHI nodes in 780 // the exception destination block still have entries due to the original 781 // invoke instruction. Eliminate these entries (which might even delete the 782 // PHI node) now. 783 UnwindDest->removePredecessor(InvokeBB); 784 } 785 786 /// When inlining a call site that has !llvm.mem.parallel_loop_access, 787 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should 788 /// be propagated to all memory-accessing cloned instructions. 789 static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, 790 Function::iterator FEnd) { 791 MDNode *MemParallelLoopAccess = 792 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access); 793 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group); 794 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope); 795 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias); 796 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias) 797 return; 798 799 for (BasicBlock &BB : make_range(FStart, FEnd)) { 800 for (Instruction &I : BB) { 801 // This metadata is only relevant for instructions that access memory. 802 if (!I.mayReadOrWriteMemory()) 803 continue; 804 805 if (MemParallelLoopAccess) { 806 // TODO: This probably should not overwrite MemParalleLoopAccess. 807 MemParallelLoopAccess = MDNode::concatenate( 808 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access), 809 MemParallelLoopAccess); 810 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access, 811 MemParallelLoopAccess); 812 } 813 814 if (AccessGroup) 815 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups( 816 I.getMetadata(LLVMContext::MD_access_group), AccessGroup)); 817 818 if (AliasScope) 819 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate( 820 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope)); 821 822 if (NoAlias) 823 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate( 824 I.getMetadata(LLVMContext::MD_noalias), NoAlias)); 825 } 826 } 827 } 828 829 namespace { 830 /// Utility for cloning !noalias and !alias.scope metadata. When a code region 831 /// using scoped alias metadata is inlined, the aliasing relationships may not 832 /// hold between the two version. It is necessary to create a deep clone of the 833 /// metadata, putting the two versions in separate scope domains. 834 class ScopedAliasMetadataDeepCloner { 835 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>; 836 SetVector<const MDNode *> MD; 837 MetadataMap MDMap; 838 void addRecursiveMetadataUses(); 839 840 public: 841 ScopedAliasMetadataDeepCloner(const Function *F); 842 843 /// Create a new clone of the scoped alias metadata, which will be used by 844 /// subsequent remap() calls. 845 void clone(); 846 847 /// Remap instructions in the given range from the original to the cloned 848 /// metadata. 849 void remap(Function::iterator FStart, Function::iterator FEnd); 850 }; 851 } // namespace 852 853 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner( 854 const Function *F) { 855 for (const BasicBlock &BB : *F) { 856 for (const Instruction &I : BB) { 857 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope)) 858 MD.insert(M); 859 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias)) 860 MD.insert(M); 861 862 // We also need to clone the metadata in noalias intrinsics. 863 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 864 MD.insert(Decl->getScopeList()); 865 } 866 } 867 addRecursiveMetadataUses(); 868 } 869 870 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() { 871 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 872 while (!Queue.empty()) { 873 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 874 for (const Metadata *Op : M->operands()) 875 if (const MDNode *OpMD = dyn_cast<MDNode>(Op)) 876 if (MD.insert(OpMD)) 877 Queue.push_back(OpMD); 878 } 879 } 880 881 void ScopedAliasMetadataDeepCloner::clone() { 882 assert(MDMap.empty() && "clone() already called ?"); 883 884 SmallVector<TempMDTuple, 16> DummyNodes; 885 for (const MDNode *I : MD) { 886 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None)); 887 MDMap[I].reset(DummyNodes.back().get()); 888 } 889 890 // Create new metadata nodes to replace the dummy nodes, replacing old 891 // metadata references with either a dummy node or an already-created new 892 // node. 893 SmallVector<Metadata *, 4> NewOps; 894 for (const MDNode *I : MD) { 895 for (const Metadata *Op : I->operands()) { 896 if (const MDNode *M = dyn_cast<MDNode>(Op)) 897 NewOps.push_back(MDMap[M]); 898 else 899 NewOps.push_back(const_cast<Metadata *>(Op)); 900 } 901 902 MDNode *NewM = MDNode::get(I->getContext(), NewOps); 903 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 904 assert(TempM->isTemporary() && "Expected temporary node"); 905 906 TempM->replaceAllUsesWith(NewM); 907 NewOps.clear(); 908 } 909 } 910 911 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart, 912 Function::iterator FEnd) { 913 if (MDMap.empty()) 914 return; // Nothing to do. 915 916 for (BasicBlock &BB : make_range(FStart, FEnd)) { 917 for (Instruction &I : BB) { 918 // TODO: The null checks for the MDMap.lookup() results should no longer 919 // be necessary. 920 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope)) 921 if (MDNode *MNew = MDMap.lookup(M)) 922 I.setMetadata(LLVMContext::MD_alias_scope, MNew); 923 924 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias)) 925 if (MDNode *MNew = MDMap.lookup(M)) 926 I.setMetadata(LLVMContext::MD_noalias, MNew); 927 928 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 929 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList())) 930 Decl->setScopeList(MNew); 931 } 932 } 933 } 934 935 /// If the inlined function has noalias arguments, 936 /// then add new alias scopes for each noalias argument, tag the mapped noalias 937 /// parameters with noalias metadata specifying the new scope, and tag all 938 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 939 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, 940 const DataLayout &DL, AAResults *CalleeAAR, 941 ClonedCodeInfo &InlinedFunctionInfo) { 942 if (!EnableNoAliasConversion) 943 return; 944 945 const Function *CalledFunc = CB.getCalledFunction(); 946 SmallVector<const Argument *, 4> NoAliasArgs; 947 948 for (const Argument &Arg : CalledFunc->args()) 949 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty()) 950 NoAliasArgs.push_back(&Arg); 951 952 if (NoAliasArgs.empty()) 953 return; 954 955 // To do a good job, if a noalias variable is captured, we need to know if 956 // the capture point dominates the particular use we're considering. 957 DominatorTree DT; 958 DT.recalculate(const_cast<Function&>(*CalledFunc)); 959 960 // noalias indicates that pointer values based on the argument do not alias 961 // pointer values which are not based on it. So we add a new "scope" for each 962 // noalias function argument. Accesses using pointers based on that argument 963 // become part of that alias scope, accesses using pointers not based on that 964 // argument are tagged as noalias with that scope. 965 966 DenseMap<const Argument *, MDNode *> NewScopes; 967 MDBuilder MDB(CalledFunc->getContext()); 968 969 // Create a new scope domain for this function. 970 MDNode *NewDomain = 971 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 972 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 973 const Argument *A = NoAliasArgs[i]; 974 975 std::string Name = std::string(CalledFunc->getName()); 976 if (A->hasName()) { 977 Name += ": %"; 978 Name += A->getName(); 979 } else { 980 Name += ": argument "; 981 Name += utostr(i); 982 } 983 984 // Note: We always create a new anonymous root here. This is true regardless 985 // of the linkage of the callee because the aliasing "scope" is not just a 986 // property of the callee, but also all control dependencies in the caller. 987 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 988 NewScopes.insert(std::make_pair(A, NewScope)); 989 990 if (UseNoAliasIntrinsic) { 991 // Introduce a llvm.experimental.noalias.scope.decl for the noalias 992 // argument. 993 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope); 994 auto *NoAliasDecl = 995 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList); 996 // Ignore the result for now. The result will be used when the 997 // llvm.noalias intrinsic is introduced. 998 (void)NoAliasDecl; 999 } 1000 } 1001 1002 // Iterate over all new instructions in the map; for all memory-access 1003 // instructions, add the alias scope metadata. 1004 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 1005 VMI != VMIE; ++VMI) { 1006 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 1007 if (!VMI->second) 1008 continue; 1009 1010 Instruction *NI = dyn_cast<Instruction>(VMI->second); 1011 if (!NI || InlinedFunctionInfo.isSimplified(I, NI)) 1012 continue; 1013 1014 bool IsArgMemOnlyCall = false, IsFuncCall = false; 1015 SmallVector<const Value *, 2> PtrArgs; 1016 1017 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 1018 PtrArgs.push_back(LI->getPointerOperand()); 1019 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 1020 PtrArgs.push_back(SI->getPointerOperand()); 1021 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 1022 PtrArgs.push_back(VAAI->getPointerOperand()); 1023 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 1024 PtrArgs.push_back(CXI->getPointerOperand()); 1025 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 1026 PtrArgs.push_back(RMWI->getPointerOperand()); 1027 else if (const auto *Call = dyn_cast<CallBase>(I)) { 1028 // If we know that the call does not access memory, then we'll still 1029 // know that about the inlined clone of this call site, and we don't 1030 // need to add metadata. 1031 if (Call->doesNotAccessMemory()) 1032 continue; 1033 1034 IsFuncCall = true; 1035 if (CalleeAAR) { 1036 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call); 1037 1038 // We'll retain this knowledge without additional metadata. 1039 if (AAResults::onlyAccessesInaccessibleMem(MRB)) 1040 continue; 1041 1042 if (AAResults::onlyAccessesArgPointees(MRB)) 1043 IsArgMemOnlyCall = true; 1044 } 1045 1046 for (Value *Arg : Call->args()) { 1047 // We need to check the underlying objects of all arguments, not just 1048 // the pointer arguments, because we might be passing pointers as 1049 // integers, etc. 1050 // However, if we know that the call only accesses pointer arguments, 1051 // then we only need to check the pointer arguments. 1052 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 1053 continue; 1054 1055 PtrArgs.push_back(Arg); 1056 } 1057 } 1058 1059 // If we found no pointers, then this instruction is not suitable for 1060 // pairing with an instruction to receive aliasing metadata. 1061 // However, if this is a call, this we might just alias with none of the 1062 // noalias arguments. 1063 if (PtrArgs.empty() && !IsFuncCall) 1064 continue; 1065 1066 // It is possible that there is only one underlying object, but you 1067 // need to go through several PHIs to see it, and thus could be 1068 // repeated in the Objects list. 1069 SmallPtrSet<const Value *, 4> ObjSet; 1070 SmallVector<Metadata *, 4> Scopes, NoAliases; 1071 1072 SmallSetVector<const Argument *, 4> NAPtrArgs; 1073 for (const Value *V : PtrArgs) { 1074 SmallVector<const Value *, 4> Objects; 1075 getUnderlyingObjects(V, Objects, /* LI = */ nullptr); 1076 1077 for (const Value *O : Objects) 1078 ObjSet.insert(O); 1079 } 1080 1081 // Figure out if we're derived from anything that is not a noalias 1082 // argument. 1083 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1084 for (const Value *V : ObjSet) { 1085 // Is this value a constant that cannot be derived from any pointer 1086 // value (we need to exclude constant expressions, for example, that 1087 // are formed from arithmetic on global symbols). 1088 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1089 isa<ConstantPointerNull>(V) || 1090 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1091 if (IsNonPtrConst) 1092 continue; 1093 1094 // If this is anything other than a noalias argument, then we cannot 1095 // completely describe the aliasing properties using alias.scope 1096 // metadata (and, thus, won't add any). 1097 if (const Argument *A = dyn_cast<Argument>(V)) { 1098 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias)) 1099 UsesAliasingPtr = true; 1100 } else { 1101 UsesAliasingPtr = true; 1102 } 1103 1104 // If this is not some identified function-local object (which cannot 1105 // directly alias a noalias argument), or some other argument (which, 1106 // by definition, also cannot alias a noalias argument), then we could 1107 // alias a noalias argument that has been captured). 1108 if (!isa<Argument>(V) && 1109 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1110 CanDeriveViaCapture = true; 1111 } 1112 1113 // A function call can always get captured noalias pointers (via other 1114 // parameters, globals, etc.). 1115 if (IsFuncCall && !IsArgMemOnlyCall) 1116 CanDeriveViaCapture = true; 1117 1118 // First, we want to figure out all of the sets with which we definitely 1119 // don't alias. Iterate over all noalias set, and add those for which: 1120 // 1. The noalias argument is not in the set of objects from which we 1121 // definitely derive. 1122 // 2. The noalias argument has not yet been captured. 1123 // An arbitrary function that might load pointers could see captured 1124 // noalias arguments via other noalias arguments or globals, and so we 1125 // must always check for prior capture. 1126 for (const Argument *A : NoAliasArgs) { 1127 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1128 // It might be tempting to skip the 1129 // PointerMayBeCapturedBefore check if 1130 // A->hasNoCaptureAttr() is true, but this is 1131 // incorrect because nocapture only guarantees 1132 // that no copies outlive the function, not 1133 // that the value cannot be locally captured. 1134 !PointerMayBeCapturedBefore(A, 1135 /* ReturnCaptures */ false, 1136 /* StoreCaptures */ false, I, &DT))) 1137 NoAliases.push_back(NewScopes[A]); 1138 } 1139 1140 if (!NoAliases.empty()) 1141 NI->setMetadata(LLVMContext::MD_noalias, 1142 MDNode::concatenate( 1143 NI->getMetadata(LLVMContext::MD_noalias), 1144 MDNode::get(CalledFunc->getContext(), NoAliases))); 1145 1146 // Next, we want to figure out all of the sets to which we might belong. 1147 // We might belong to a set if the noalias argument is in the set of 1148 // underlying objects. If there is some non-noalias argument in our list 1149 // of underlying objects, then we cannot add a scope because the fact 1150 // that some access does not alias with any set of our noalias arguments 1151 // cannot itself guarantee that it does not alias with this access 1152 // (because there is some pointer of unknown origin involved and the 1153 // other access might also depend on this pointer). We also cannot add 1154 // scopes to arbitrary functions unless we know they don't access any 1155 // non-parameter pointer-values. 1156 bool CanAddScopes = !UsesAliasingPtr; 1157 if (CanAddScopes && IsFuncCall) 1158 CanAddScopes = IsArgMemOnlyCall; 1159 1160 if (CanAddScopes) 1161 for (const Argument *A : NoAliasArgs) { 1162 if (ObjSet.count(A)) 1163 Scopes.push_back(NewScopes[A]); 1164 } 1165 1166 if (!Scopes.empty()) 1167 NI->setMetadata( 1168 LLVMContext::MD_alias_scope, 1169 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1170 MDNode::get(CalledFunc->getContext(), Scopes))); 1171 } 1172 } 1173 } 1174 1175 static bool MayContainThrowingOrExitingCall(Instruction *Begin, 1176 Instruction *End) { 1177 1178 assert(Begin->getParent() == End->getParent() && 1179 "Expected to be in same basic block!"); 1180 return !llvm::isGuaranteedToTransferExecutionToSuccessor( 1181 Begin->getIterator(), End->getIterator(), InlinerAttributeWindow + 1); 1182 } 1183 1184 static AttrBuilder IdentifyValidAttributes(CallBase &CB) { 1185 1186 AttrBuilder AB(CB.getContext(), CB.getAttributes().getRetAttrs()); 1187 if (!AB.hasAttributes()) 1188 return AB; 1189 AttrBuilder Valid(CB.getContext()); 1190 // Only allow these white listed attributes to be propagated back to the 1191 // callee. This is because other attributes may only be valid on the call 1192 // itself, i.e. attributes such as signext and zeroext. 1193 if (auto DerefBytes = AB.getDereferenceableBytes()) 1194 Valid.addDereferenceableAttr(DerefBytes); 1195 if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes()) 1196 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes); 1197 if (AB.contains(Attribute::NoAlias)) 1198 Valid.addAttribute(Attribute::NoAlias); 1199 if (AB.contains(Attribute::NonNull)) 1200 Valid.addAttribute(Attribute::NonNull); 1201 return Valid; 1202 } 1203 1204 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) { 1205 if (!UpdateReturnAttributes) 1206 return; 1207 1208 AttrBuilder Valid = IdentifyValidAttributes(CB); 1209 if (!Valid.hasAttributes()) 1210 return; 1211 auto *CalledFunction = CB.getCalledFunction(); 1212 auto &Context = CalledFunction->getContext(); 1213 1214 for (auto &BB : *CalledFunction) { 1215 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()); 1216 if (!RI || !isa<CallBase>(RI->getOperand(0))) 1217 continue; 1218 auto *RetVal = cast<CallBase>(RI->getOperand(0)); 1219 // Check that the cloned RetVal exists and is a call, otherwise we cannot 1220 // add the attributes on the cloned RetVal. Simplification during inlining 1221 // could have transformed the cloned instruction. 1222 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal)); 1223 if (!NewRetVal) 1224 continue; 1225 // Backward propagation of attributes to the returned value may be incorrect 1226 // if it is control flow dependent. 1227 // Consider: 1228 // @callee { 1229 // %rv = call @foo() 1230 // %rv2 = call @bar() 1231 // if (%rv2 != null) 1232 // return %rv2 1233 // if (%rv == null) 1234 // exit() 1235 // return %rv 1236 // } 1237 // caller() { 1238 // %val = call nonnull @callee() 1239 // } 1240 // Here we cannot add the nonnull attribute on either foo or bar. So, we 1241 // limit the check to both RetVal and RI are in the same basic block and 1242 // there are no throwing/exiting instructions between these instructions. 1243 if (RI->getParent() != RetVal->getParent() || 1244 MayContainThrowingOrExitingCall(RetVal, RI)) 1245 continue; 1246 // Add to the existing attributes of NewRetVal, i.e. the cloned call 1247 // instruction. 1248 // NB! When we have the same attribute already existing on NewRetVal, but 1249 // with a differing value, the AttributeList's merge API honours the already 1250 // existing attribute value (i.e. attributes such as dereferenceable, 1251 // dereferenceable_or_null etc). See AttrBuilder::merge for more details. 1252 AttributeList AL = NewRetVal->getAttributes(); 1253 AttributeList NewAL = AL.addRetAttributes(Context, Valid); 1254 NewRetVal->setAttributes(NewAL); 1255 } 1256 } 1257 1258 /// If the inlined function has non-byval align arguments, then 1259 /// add @llvm.assume-based alignment assumptions to preserve this information. 1260 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) { 1261 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1262 return; 1263 1264 AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller()); 1265 auto &DL = CB.getCaller()->getParent()->getDataLayout(); 1266 1267 // To avoid inserting redundant assumptions, we should check for assumptions 1268 // already in the caller. To do this, we might need a DT of the caller. 1269 DominatorTree DT; 1270 bool DTCalculated = false; 1271 1272 Function *CalledFunc = CB.getCalledFunction(); 1273 for (Argument &Arg : CalledFunc->args()) { 1274 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0; 1275 if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) { 1276 if (!DTCalculated) { 1277 DT.recalculate(*CB.getCaller()); 1278 DTCalculated = true; 1279 } 1280 1281 // If we can already prove the asserted alignment in the context of the 1282 // caller, then don't bother inserting the assumption. 1283 Value *ArgVal = CB.getArgOperand(Arg.getArgNo()); 1284 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align) 1285 continue; 1286 1287 CallInst *NewAsmp = 1288 IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align); 1289 AC->registerAssumption(cast<AssumeInst>(NewAsmp)); 1290 } 1291 } 1292 } 1293 1294 /// Once we have cloned code over from a callee into the caller, 1295 /// update the specified callgraph to reflect the changes we made. 1296 /// Note that it's possible that not all code was copied over, so only 1297 /// some edges of the callgraph may remain. 1298 static void UpdateCallGraphAfterInlining(CallBase &CB, 1299 Function::iterator FirstNewBlock, 1300 ValueToValueMapTy &VMap, 1301 InlineFunctionInfo &IFI) { 1302 CallGraph &CG = *IFI.CG; 1303 const Function *Caller = CB.getCaller(); 1304 const Function *Callee = CB.getCalledFunction(); 1305 CallGraphNode *CalleeNode = CG[Callee]; 1306 CallGraphNode *CallerNode = CG[Caller]; 1307 1308 // Since we inlined some uninlined call sites in the callee into the caller, 1309 // add edges from the caller to all of the callees of the callee. 1310 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1311 1312 // Consider the case where CalleeNode == CallerNode. 1313 CallGraphNode::CalledFunctionsVector CallCache; 1314 if (CalleeNode == CallerNode) { 1315 CallCache.assign(I, E); 1316 I = CallCache.begin(); 1317 E = CallCache.end(); 1318 } 1319 1320 for (; I != E; ++I) { 1321 // Skip 'refererence' call records. 1322 if (!I->first) 1323 continue; 1324 1325 const Value *OrigCall = *I->first; 1326 1327 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1328 // Only copy the edge if the call was inlined! 1329 if (VMI == VMap.end() || VMI->second == nullptr) 1330 continue; 1331 1332 // If the call was inlined, but then constant folded, there is no edge to 1333 // add. Check for this case. 1334 auto *NewCall = dyn_cast<CallBase>(VMI->second); 1335 if (!NewCall) 1336 continue; 1337 1338 // We do not treat intrinsic calls like real function calls because we 1339 // expect them to become inline code; do not add an edge for an intrinsic. 1340 if (NewCall->getCalledFunction() && 1341 NewCall->getCalledFunction()->isIntrinsic()) 1342 continue; 1343 1344 // Remember that this call site got inlined for the client of 1345 // InlineFunction. 1346 IFI.InlinedCalls.push_back(NewCall); 1347 1348 // It's possible that inlining the callsite will cause it to go from an 1349 // indirect to a direct call by resolving a function pointer. If this 1350 // happens, set the callee of the new call site to a more precise 1351 // destination. This can also happen if the call graph node of the caller 1352 // was just unnecessarily imprecise. 1353 if (!I->second->getFunction()) 1354 if (Function *F = NewCall->getCalledFunction()) { 1355 // Indirect call site resolved to direct call. 1356 CallerNode->addCalledFunction(NewCall, CG[F]); 1357 1358 continue; 1359 } 1360 1361 CallerNode->addCalledFunction(NewCall, I->second); 1362 } 1363 1364 // Update the call graph by deleting the edge from Callee to Caller. We must 1365 // do this after the loop above in case Caller and Callee are the same. 1366 CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB)); 1367 } 1368 1369 static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, 1370 Module *M, BasicBlock *InsertBlock, 1371 InlineFunctionInfo &IFI) { 1372 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1373 1374 Value *Size = 1375 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType)); 1376 1377 // Always generate a memcpy of alignment 1 here because we don't know 1378 // the alignment of the src pointer. Other optimizations can infer 1379 // better alignment. 1380 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src, 1381 /*SrcAlign*/ Align(1), Size); 1382 } 1383 1384 /// When inlining a call site that has a byval argument, 1385 /// we have to make the implicit memcpy explicit by adding it. 1386 static Value *HandleByValArgument(Type *ByValType, Value *Arg, 1387 Instruction *TheCall, 1388 const Function *CalledFunc, 1389 InlineFunctionInfo &IFI, 1390 unsigned ByValAlignment) { 1391 assert(cast<PointerType>(Arg->getType()) 1392 ->isOpaqueOrPointeeTypeMatches(ByValType)); 1393 Function *Caller = TheCall->getFunction(); 1394 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1395 1396 // If the called function is readonly, then it could not mutate the caller's 1397 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1398 // temporary. 1399 if (CalledFunc->onlyReadsMemory()) { 1400 // If the byval argument has a specified alignment that is greater than the 1401 // passed in pointer, then we either have to round up the input pointer or 1402 // give up on this transformation. 1403 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1404 return Arg; 1405 1406 AssumptionCache *AC = 1407 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 1408 1409 // If the pointer is already known to be sufficiently aligned, or if we can 1410 // round it up to a larger alignment, then we don't need a temporary. 1411 if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall, 1412 AC) >= ByValAlignment) 1413 return Arg; 1414 1415 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1416 // for code quality, but rarely happens and is required for correctness. 1417 } 1418 1419 // Create the alloca. If we have DataLayout, use nice alignment. 1420 Align Alignment(DL.getPrefTypeAlignment(ByValType)); 1421 1422 // If the byval had an alignment specified, we *must* use at least that 1423 // alignment, as it is required by the byval argument (and uses of the 1424 // pointer inside the callee). 1425 Alignment = max(Alignment, MaybeAlign(ByValAlignment)); 1426 1427 Value *NewAlloca = 1428 new AllocaInst(ByValType, DL.getAllocaAddrSpace(), nullptr, Alignment, 1429 Arg->getName(), &*Caller->begin()->begin()); 1430 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1431 1432 // Uses of the argument in the function should use our new alloca 1433 // instead. 1434 return NewAlloca; 1435 } 1436 1437 // Check whether this Value is used by a lifetime intrinsic. 1438 static bool isUsedByLifetimeMarker(Value *V) { 1439 for (User *U : V->users()) 1440 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) 1441 if (II->isLifetimeStartOrEnd()) 1442 return true; 1443 return false; 1444 } 1445 1446 // Check whether the given alloca already has 1447 // lifetime.start or lifetime.end intrinsics. 1448 static bool hasLifetimeMarkers(AllocaInst *AI) { 1449 Type *Ty = AI->getType(); 1450 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1451 Ty->getPointerAddressSpace()); 1452 if (Ty == Int8PtrTy) 1453 return isUsedByLifetimeMarker(AI); 1454 1455 // Do a scan to find all the casts to i8*. 1456 for (User *U : AI->users()) { 1457 if (U->getType() != Int8PtrTy) continue; 1458 if (U->stripPointerCasts() != AI) continue; 1459 if (isUsedByLifetimeMarker(U)) 1460 return true; 1461 } 1462 return false; 1463 } 1464 1465 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1466 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1467 /// cannot be static. 1468 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1469 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1470 } 1471 1472 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL 1473 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache. 1474 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, 1475 LLVMContext &Ctx, 1476 DenseMap<const MDNode *, MDNode *> &IANodes) { 1477 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes); 1478 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(), 1479 OrigDL.getScope(), IA); 1480 } 1481 1482 /// Update inlined instructions' line numbers to 1483 /// to encode location where these instructions are inlined. 1484 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1485 Instruction *TheCall, bool CalleeHasDebugInfo) { 1486 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1487 if (!TheCallDL) 1488 return; 1489 1490 auto &Ctx = Fn->getContext(); 1491 DILocation *InlinedAtNode = TheCallDL; 1492 1493 // Create a unique call site, not to be confused with any other call from the 1494 // same location. 1495 InlinedAtNode = DILocation::getDistinct( 1496 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1497 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1498 1499 // Cache the inlined-at nodes as they're built so they are reused, without 1500 // this every instruction's inlined-at chain would become distinct from each 1501 // other. 1502 DenseMap<const MDNode *, MDNode *> IANodes; 1503 1504 // Check if we are not generating inline line tables and want to use 1505 // the call site location instead. 1506 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables"); 1507 1508 for (; FI != Fn->end(); ++FI) { 1509 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1510 BI != BE; ++BI) { 1511 // Loop metadata needs to be updated so that the start and end locs 1512 // reference inlined-at locations. 1513 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, 1514 &IANodes](Metadata *MD) -> Metadata * { 1515 if (auto *Loc = dyn_cast_or_null<DILocation>(MD)) 1516 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get(); 1517 return MD; 1518 }; 1519 updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc); 1520 1521 if (!NoInlineLineTables) 1522 if (DebugLoc DL = BI->getDebugLoc()) { 1523 DebugLoc IDL = 1524 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes); 1525 BI->setDebugLoc(IDL); 1526 continue; 1527 } 1528 1529 if (CalleeHasDebugInfo && !NoInlineLineTables) 1530 continue; 1531 1532 // If the inlined instruction has no line number, or if inline info 1533 // is not being generated, make it look as if it originates from the call 1534 // location. This is important for ((__always_inline, __nodebug__)) 1535 // functions which must use caller location for all instructions in their 1536 // function body. 1537 1538 // Don't update static allocas, as they may get moved later. 1539 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1540 if (allocaWouldBeStaticInEntry(AI)) 1541 continue; 1542 1543 BI->setDebugLoc(TheCallDL); 1544 } 1545 1546 // Remove debug info intrinsics if we're not keeping inline info. 1547 if (NoInlineLineTables) { 1548 BasicBlock::iterator BI = FI->begin(); 1549 while (BI != FI->end()) { 1550 if (isa<DbgInfoIntrinsic>(BI)) { 1551 BI = BI->eraseFromParent(); 1552 continue; 1553 } 1554 ++BI; 1555 } 1556 } 1557 1558 } 1559 } 1560 1561 /// Update the block frequencies of the caller after a callee has been inlined. 1562 /// 1563 /// Each block cloned into the caller has its block frequency scaled by the 1564 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1565 /// callee's entry block gets the same frequency as the callsite block and the 1566 /// relative frequencies of all cloned blocks remain the same after cloning. 1567 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1568 const ValueToValueMapTy &VMap, 1569 BlockFrequencyInfo *CallerBFI, 1570 BlockFrequencyInfo *CalleeBFI, 1571 const BasicBlock &CalleeEntryBlock) { 1572 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1573 for (auto Entry : VMap) { 1574 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1575 continue; 1576 auto *OrigBB = cast<BasicBlock>(Entry.first); 1577 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1578 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency(); 1579 if (!ClonedBBs.insert(ClonedBB).second) { 1580 // Multiple blocks in the callee might get mapped to one cloned block in 1581 // the caller since we prune the callee as we clone it. When that happens, 1582 // we want to use the maximum among the original blocks' frequencies. 1583 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency(); 1584 if (NewFreq > Freq) 1585 Freq = NewFreq; 1586 } 1587 CallerBFI->setBlockFreq(ClonedBB, Freq); 1588 } 1589 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1590 CallerBFI->setBlockFreqAndScale( 1591 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1592 ClonedBBs); 1593 } 1594 1595 /// Update the branch metadata for cloned call instructions. 1596 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, 1597 const ProfileCount &CalleeEntryCount, 1598 const CallBase &TheCall, ProfileSummaryInfo *PSI, 1599 BlockFrequencyInfo *CallerBFI) { 1600 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1) 1601 return; 1602 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None; 1603 int64_t CallCount = 1604 std::min(CallSiteCount.getValueOr(0), CalleeEntryCount.getCount()); 1605 updateProfileCallee(Callee, -CallCount, &VMap); 1606 } 1607 1608 void llvm::updateProfileCallee( 1609 Function *Callee, int64_t EntryDelta, 1610 const ValueMap<const Value *, WeakTrackingVH> *VMap) { 1611 auto CalleeCount = Callee->getEntryCount(); 1612 if (!CalleeCount.hasValue()) 1613 return; 1614 1615 const uint64_t PriorEntryCount = CalleeCount->getCount(); 1616 1617 // Since CallSiteCount is an estimate, it could exceed the original callee 1618 // count and has to be set to 0 so guard against underflow. 1619 const uint64_t NewEntryCount = 1620 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount) 1621 ? 0 1622 : PriorEntryCount + EntryDelta; 1623 1624 // During inlining ? 1625 if (VMap) { 1626 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount; 1627 for (auto Entry : *VMap) 1628 if (isa<CallInst>(Entry.first)) 1629 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) 1630 CI->updateProfWeight(CloneEntryCount, PriorEntryCount); 1631 } 1632 1633 if (EntryDelta) { 1634 Callee->setEntryCount(NewEntryCount); 1635 1636 for (BasicBlock &BB : *Callee) 1637 // No need to update the callsite if it is pruned during inlining. 1638 if (!VMap || VMap->count(&BB)) 1639 for (Instruction &I : BB) 1640 if (CallInst *CI = dyn_cast<CallInst>(&I)) 1641 CI->updateProfWeight(NewEntryCount, PriorEntryCount); 1642 } 1643 } 1644 1645 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call 1646 /// result is implicitly consumed by a call to retainRV or claimRV immediately 1647 /// after the call. This function inlines the retainRV/claimRV calls. 1648 /// 1649 /// There are three cases to consider: 1650 /// 1651 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned 1652 /// object in the callee return block, the autoreleaseRV call and the 1653 /// retainRV/claimRV call in the caller cancel out. If the call in the caller 1654 /// is a claimRV call, a call to objc_release is emitted. 1655 /// 1656 /// 2. If there is a call in the callee return block that doesn't have operand 1657 /// bundle "clang.arc.attachedcall", the operand bundle on the original call 1658 /// is transferred to the call in the callee. 1659 /// 1660 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is 1661 /// a retainRV call. 1662 static void 1663 inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, 1664 const SmallVectorImpl<ReturnInst *> &Returns) { 1665 Module *Mod = CB.getModule(); 1666 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function"); 1667 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV, 1668 IsUnsafeClaimRV = !IsRetainRV; 1669 1670 for (auto *RI : Returns) { 1671 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0)); 1672 bool InsertRetainCall = IsRetainRV; 1673 IRBuilder<> Builder(RI->getContext()); 1674 1675 // Walk backwards through the basic block looking for either a matching 1676 // autoreleaseRV call or an unannotated call. 1677 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()), 1678 RI->getParent()->rend()); 1679 for (Instruction &I : llvm::make_early_inc_range(InstRange)) { 1680 // Ignore casts. 1681 if (isa<CastInst>(I)) 1682 continue; 1683 1684 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 1685 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue || 1686 !II->hasNUses(0) || 1687 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd) 1688 break; 1689 1690 // If we've found a matching authoreleaseRV call: 1691 // - If claimRV is attached to the call, insert a call to objc_release 1692 // and erase the autoreleaseRV call. 1693 // - If retainRV is attached to the call, just erase the autoreleaseRV 1694 // call. 1695 if (IsUnsafeClaimRV) { 1696 Builder.SetInsertPoint(II); 1697 Function *IFn = 1698 Intrinsic::getDeclaration(Mod, Intrinsic::objc_release); 1699 Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); 1700 Builder.CreateCall(IFn, BC, ""); 1701 } 1702 II->eraseFromParent(); 1703 InsertRetainCall = false; 1704 break; 1705 } 1706 1707 auto *CI = dyn_cast<CallInst>(&I); 1708 1709 if (!CI) 1710 break; 1711 1712 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd || 1713 objcarc::hasAttachedCallOpBundle(CI)) 1714 break; 1715 1716 // If we've found an unannotated call that defines RetOpnd, add a 1717 // "clang.arc.attachedcall" operand bundle. 1718 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)}; 1719 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs); 1720 auto *NewCall = CallBase::addOperandBundle( 1721 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI); 1722 NewCall->copyMetadata(*CI); 1723 CI->replaceAllUsesWith(NewCall); 1724 CI->eraseFromParent(); 1725 InsertRetainCall = false; 1726 break; 1727 } 1728 1729 if (InsertRetainCall) { 1730 // The retainRV is attached to the call and we've failed to find a 1731 // matching autoreleaseRV or an annotated call in the callee. Emit a call 1732 // to objc_retain. 1733 Builder.SetInsertPoint(RI); 1734 Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain); 1735 Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); 1736 Builder.CreateCall(IFn, BC, ""); 1737 } 1738 } 1739 } 1740 1741 /// This function inlines the called function into the basic block of the 1742 /// caller. This returns false if it is not possible to inline this call. 1743 /// The program is still in a well defined state if this occurs though. 1744 /// 1745 /// Note that this only does one level of inlining. For example, if the 1746 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1747 /// exists in the instruction stream. Similarly this will inline a recursive 1748 /// function by one level. 1749 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, 1750 AAResults *CalleeAAR, 1751 bool InsertLifetime, 1752 Function *ForwardVarArgsTo) { 1753 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!"); 1754 1755 // FIXME: we don't inline callbr yet. 1756 if (isa<CallBrInst>(CB)) 1757 return InlineResult::failure("We don't inline callbr yet."); 1758 1759 // If IFI has any state in it, zap it before we fill it in. 1760 IFI.reset(); 1761 1762 Function *CalledFunc = CB.getCalledFunction(); 1763 if (!CalledFunc || // Can't inline external function or indirect 1764 CalledFunc->isDeclaration()) // call! 1765 return InlineResult::failure("external or indirect"); 1766 1767 // The inliner does not know how to inline through calls with operand bundles 1768 // in general ... 1769 if (CB.hasOperandBundles()) { 1770 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) { 1771 uint32_t Tag = CB.getOperandBundleAt(i).getTagID(); 1772 // ... but it knows how to inline through "deopt" operand bundles ... 1773 if (Tag == LLVMContext::OB_deopt) 1774 continue; 1775 // ... and "funclet" operand bundles. 1776 if (Tag == LLVMContext::OB_funclet) 1777 continue; 1778 if (Tag == LLVMContext::OB_clang_arc_attachedcall) 1779 continue; 1780 1781 return InlineResult::failure("unsupported operand bundle"); 1782 } 1783 } 1784 1785 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1786 // calls that we inline. 1787 bool MarkNoUnwind = CB.doesNotThrow(); 1788 1789 BasicBlock *OrigBB = CB.getParent(); 1790 Function *Caller = OrigBB->getParent(); 1791 1792 // GC poses two hazards to inlining, which only occur when the callee has GC: 1793 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1794 // caller. 1795 // 2. If the caller has a differing GC, it is invalid to inline. 1796 if (CalledFunc->hasGC()) { 1797 if (!Caller->hasGC()) 1798 Caller->setGC(CalledFunc->getGC()); 1799 else if (CalledFunc->getGC() != Caller->getGC()) 1800 return InlineResult::failure("incompatible GC"); 1801 } 1802 1803 // Get the personality function from the callee if it contains a landing pad. 1804 Constant *CalledPersonality = 1805 CalledFunc->hasPersonalityFn() 1806 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1807 : nullptr; 1808 1809 // Find the personality function used by the landing pads of the caller. If it 1810 // exists, then check to see that it matches the personality function used in 1811 // the callee. 1812 Constant *CallerPersonality = 1813 Caller->hasPersonalityFn() 1814 ? Caller->getPersonalityFn()->stripPointerCasts() 1815 : nullptr; 1816 if (CalledPersonality) { 1817 if (!CallerPersonality) 1818 Caller->setPersonalityFn(CalledPersonality); 1819 // If the personality functions match, then we can perform the 1820 // inlining. Otherwise, we can't inline. 1821 // TODO: This isn't 100% true. Some personality functions are proper 1822 // supersets of others and can be used in place of the other. 1823 else if (CalledPersonality != CallerPersonality) 1824 return InlineResult::failure("incompatible personality"); 1825 } 1826 1827 // We need to figure out which funclet the callsite was in so that we may 1828 // properly nest the callee. 1829 Instruction *CallSiteEHPad = nullptr; 1830 if (CallerPersonality) { 1831 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1832 if (isScopedEHPersonality(Personality)) { 1833 Optional<OperandBundleUse> ParentFunclet = 1834 CB.getOperandBundle(LLVMContext::OB_funclet); 1835 if (ParentFunclet) 1836 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1837 1838 // OK, the inlining site is legal. What about the target function? 1839 1840 if (CallSiteEHPad) { 1841 if (Personality == EHPersonality::MSVC_CXX) { 1842 // The MSVC personality cannot tolerate catches getting inlined into 1843 // cleanup funclets. 1844 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1845 // Ok, the call site is within a cleanuppad. Let's check the callee 1846 // for catchpads. 1847 for (const BasicBlock &CalledBB : *CalledFunc) { 1848 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1849 return InlineResult::failure("catch in cleanup funclet"); 1850 } 1851 } 1852 } else if (isAsynchronousEHPersonality(Personality)) { 1853 // SEH is even less tolerant, there may not be any sort of exceptional 1854 // funclet in the callee. 1855 for (const BasicBlock &CalledBB : *CalledFunc) { 1856 if (CalledBB.isEHPad()) 1857 return InlineResult::failure("SEH in cleanup funclet"); 1858 } 1859 } 1860 } 1861 } 1862 } 1863 1864 // Determine if we are dealing with a call in an EHPad which does not unwind 1865 // to caller. 1866 bool EHPadForCallUnwindsLocally = false; 1867 if (CallSiteEHPad && isa<CallInst>(CB)) { 1868 UnwindDestMemoTy FuncletUnwindMap; 1869 Value *CallSiteUnwindDestToken = 1870 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1871 1872 EHPadForCallUnwindsLocally = 1873 CallSiteUnwindDestToken && 1874 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1875 } 1876 1877 // Get an iterator to the last basic block in the function, which will have 1878 // the new function inlined after it. 1879 Function::iterator LastBlock = --Caller->end(); 1880 1881 // Make sure to capture all of the return instructions from the cloned 1882 // function. 1883 SmallVector<ReturnInst*, 8> Returns; 1884 ClonedCodeInfo InlinedFunctionInfo; 1885 Function::iterator FirstNewBlock; 1886 1887 { // Scope to destroy VMap after cloning. 1888 ValueToValueMapTy VMap; 1889 struct ByValInit { 1890 Value *Dst; 1891 Value *Src; 1892 Type *Ty; 1893 }; 1894 // Keep a list of pair (dst, src) to emit byval initializations. 1895 SmallVector<ByValInit, 4> ByValInits; 1896 1897 // When inlining a function that contains noalias scope metadata, 1898 // this metadata needs to be cloned so that the inlined blocks 1899 // have different "unique scopes" at every call site. 1900 // Track the metadata that must be cloned. Do this before other changes to 1901 // the function, so that we do not get in trouble when inlining caller == 1902 // callee. 1903 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction()); 1904 1905 auto &DL = Caller->getParent()->getDataLayout(); 1906 1907 // Calculate the vector of arguments to pass into the function cloner, which 1908 // matches up the formal to the actual argument values. 1909 auto AI = CB.arg_begin(); 1910 unsigned ArgNo = 0; 1911 for (Function::arg_iterator I = CalledFunc->arg_begin(), 1912 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1913 Value *ActualArg = *AI; 1914 1915 // When byval arguments actually inlined, we need to make the copy implied 1916 // by them explicit. However, we don't do this if the callee is readonly 1917 // or readnone, because the copy would be unneeded: the callee doesn't 1918 // modify the struct. 1919 if (CB.isByValArgument(ArgNo)) { 1920 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg, 1921 &CB, CalledFunc, IFI, 1922 CalledFunc->getParamAlignment(ArgNo)); 1923 if (ActualArg != *AI) 1924 ByValInits.push_back( 1925 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)}); 1926 } 1927 1928 VMap[&*I] = ActualArg; 1929 } 1930 1931 // TODO: Remove this when users have been updated to the assume bundles. 1932 // Add alignment assumptions if necessary. We do this before the inlined 1933 // instructions are actually cloned into the caller so that we can easily 1934 // check what will be known at the start of the inlined code. 1935 AddAlignmentAssumptions(CB, IFI); 1936 1937 AssumptionCache *AC = 1938 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 1939 1940 /// Preserve all attributes on of the call and its parameters. 1941 salvageKnowledge(&CB, AC); 1942 1943 // We want the inliner to prune the code as it copies. We would LOVE to 1944 // have no dead or constant instructions leftover after inlining occurs 1945 // (which can happen, e.g., because an argument was constant), but we'll be 1946 // happy with whatever the cloner can do. 1947 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1948 /*ModuleLevelChanges=*/false, Returns, ".i", 1949 &InlinedFunctionInfo); 1950 // Remember the first block that is newly cloned over. 1951 FirstNewBlock = LastBlock; ++FirstNewBlock; 1952 1953 // Insert retainRV/clainRV runtime calls. 1954 objcarc::ARCInstKind RVCallKind = objcarc::getAttachedARCFunctionKind(&CB); 1955 if (RVCallKind != objcarc::ARCInstKind::None) 1956 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns); 1957 1958 // Updated caller/callee profiles only when requested. For sample loader 1959 // inlining, the context-sensitive inlinee profile doesn't need to be 1960 // subtracted from callee profile, and the inlined clone also doesn't need 1961 // to be scaled based on call site count. 1962 if (IFI.UpdateProfile) { 1963 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) 1964 // Update the BFI of blocks cloned into the caller. 1965 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 1966 CalledFunc->front()); 1967 1968 if (auto Profile = CalledFunc->getEntryCount()) 1969 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI, 1970 IFI.CallerBFI); 1971 } 1972 1973 // Inject byval arguments initialization. 1974 for (ByValInit &Init : ByValInits) 1975 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(), 1976 &*FirstNewBlock, IFI); 1977 1978 Optional<OperandBundleUse> ParentDeopt = 1979 CB.getOperandBundle(LLVMContext::OB_deopt); 1980 if (ParentDeopt) { 1981 SmallVector<OperandBundleDef, 2> OpDefs; 1982 1983 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1984 CallBase *ICS = dyn_cast_or_null<CallBase>(VH); 1985 if (!ICS) 1986 continue; // instruction was DCE'd or RAUW'ed to undef 1987 1988 OpDefs.clear(); 1989 1990 OpDefs.reserve(ICS->getNumOperandBundles()); 1991 1992 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe; 1993 ++COBi) { 1994 auto ChildOB = ICS->getOperandBundleAt(COBi); 1995 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1996 // If the inlined call has other operand bundles, let them be 1997 OpDefs.emplace_back(ChildOB); 1998 continue; 1999 } 2000 2001 // It may be useful to separate this logic (of handling operand 2002 // bundles) out to a separate "policy" component if this gets crowded. 2003 // Prepend the parent's deoptimization continuation to the newly 2004 // inlined call's deoptimization continuation. 2005 std::vector<Value *> MergedDeoptArgs; 2006 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 2007 ChildOB.Inputs.size()); 2008 2009 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs); 2010 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs); 2011 2012 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 2013 } 2014 2015 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS); 2016 2017 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 2018 // this even if the call returns void. 2019 ICS->replaceAllUsesWith(NewI); 2020 2021 VH = nullptr; 2022 ICS->eraseFromParent(); 2023 } 2024 } 2025 2026 // Update the callgraph if requested. 2027 if (IFI.CG) 2028 UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI); 2029 2030 // For 'nodebug' functions, the associated DISubprogram is always null. 2031 // Conservatively avoid propagating the callsite debug location to 2032 // instructions inlined from a function whose DISubprogram is not null. 2033 fixupLineNumbers(Caller, FirstNewBlock, &CB, 2034 CalledFunc->getSubprogram() != nullptr); 2035 2036 // Now clone the inlined noalias scope metadata. 2037 SAMetadataCloner.clone(); 2038 SAMetadataCloner.remap(FirstNewBlock, Caller->end()); 2039 2040 // Add noalias metadata if necessary. 2041 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo); 2042 2043 // Clone return attributes on the callsite into the calls within the inlined 2044 // function which feed into its return value. 2045 AddReturnAttributes(CB, VMap); 2046 2047 // Propagate metadata on the callsite if necessary. 2048 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end()); 2049 2050 // Register any cloned assumptions. 2051 if (IFI.GetAssumptionCache) 2052 for (BasicBlock &NewBlock : 2053 make_range(FirstNewBlock->getIterator(), Caller->end())) 2054 for (Instruction &I : NewBlock) 2055 if (auto *II = dyn_cast<AssumeInst>(&I)) 2056 IFI.GetAssumptionCache(*Caller).registerAssumption(II); 2057 } 2058 2059 // If there are any alloca instructions in the block that used to be the entry 2060 // block for the callee, move them to the entry block of the caller. First 2061 // calculate which instruction they should be inserted before. We insert the 2062 // instructions at the end of the current alloca list. 2063 { 2064 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 2065 for (BasicBlock::iterator I = FirstNewBlock->begin(), 2066 E = FirstNewBlock->end(); I != E; ) { 2067 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 2068 if (!AI) continue; 2069 2070 // If the alloca is now dead, remove it. This often occurs due to code 2071 // specialization. 2072 if (AI->use_empty()) { 2073 AI->eraseFromParent(); 2074 continue; 2075 } 2076 2077 if (!allocaWouldBeStaticInEntry(AI)) 2078 continue; 2079 2080 // Keep track of the static allocas that we inline into the caller. 2081 IFI.StaticAllocas.push_back(AI); 2082 2083 // Scan for the block of allocas that we can move over, and move them 2084 // all at once. 2085 while (isa<AllocaInst>(I) && 2086 !cast<AllocaInst>(I)->use_empty() && 2087 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 2088 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 2089 ++I; 2090 } 2091 2092 // Transfer all of the allocas over in a block. Using splice means 2093 // that the instructions aren't removed from the symbol table, then 2094 // reinserted. 2095 Caller->getEntryBlock().getInstList().splice( 2096 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 2097 } 2098 } 2099 2100 SmallVector<Value*,4> VarArgsToForward; 2101 SmallVector<AttributeSet, 4> VarArgsAttrs; 2102 for (unsigned i = CalledFunc->getFunctionType()->getNumParams(); 2103 i < CB.arg_size(); i++) { 2104 VarArgsToForward.push_back(CB.getArgOperand(i)); 2105 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i)); 2106 } 2107 2108 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 2109 if (InlinedFunctionInfo.ContainsCalls) { 2110 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 2111 if (CallInst *CI = dyn_cast<CallInst>(&CB)) 2112 CallSiteTailKind = CI->getTailCallKind(); 2113 2114 // For inlining purposes, the "notail" marker is the same as no marker. 2115 if (CallSiteTailKind == CallInst::TCK_NoTail) 2116 CallSiteTailKind = CallInst::TCK_None; 2117 2118 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 2119 ++BB) { 2120 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 2121 CallInst *CI = dyn_cast<CallInst>(&I); 2122 if (!CI) 2123 continue; 2124 2125 // Forward varargs from inlined call site to calls to the 2126 // ForwardVarArgsTo function, if requested, and to musttail calls. 2127 if (!VarArgsToForward.empty() && 2128 ((ForwardVarArgsTo && 2129 CI->getCalledFunction() == ForwardVarArgsTo) || 2130 CI->isMustTailCall())) { 2131 // Collect attributes for non-vararg parameters. 2132 AttributeList Attrs = CI->getAttributes(); 2133 SmallVector<AttributeSet, 8> ArgAttrs; 2134 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) { 2135 for (unsigned ArgNo = 0; 2136 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo) 2137 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 2138 } 2139 2140 // Add VarArg attributes. 2141 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end()); 2142 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(), 2143 Attrs.getRetAttrs(), ArgAttrs); 2144 // Add VarArgs to existing parameters. 2145 SmallVector<Value *, 6> Params(CI->args()); 2146 Params.append(VarArgsToForward.begin(), VarArgsToForward.end()); 2147 CallInst *NewCI = CallInst::Create( 2148 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI); 2149 NewCI->setDebugLoc(CI->getDebugLoc()); 2150 NewCI->setAttributes(Attrs); 2151 NewCI->setCallingConv(CI->getCallingConv()); 2152 CI->replaceAllUsesWith(NewCI); 2153 CI->eraseFromParent(); 2154 CI = NewCI; 2155 } 2156 2157 if (Function *F = CI->getCalledFunction()) 2158 InlinedDeoptimizeCalls |= 2159 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 2160 2161 // We need to reduce the strength of any inlined tail calls. For 2162 // musttail, we have to avoid introducing potential unbounded stack 2163 // growth. For example, if functions 'f' and 'g' are mutually recursive 2164 // with musttail, we can inline 'g' into 'f' so long as we preserve 2165 // musttail on the cloned call to 'f'. If either the inlined call site 2166 // or the cloned call site is *not* musttail, the program already has 2167 // one frame of stack growth, so it's safe to remove musttail. Here is 2168 // a table of example transformations: 2169 // 2170 // f -> musttail g -> musttail f ==> f -> musttail f 2171 // f -> musttail g -> tail f ==> f -> tail f 2172 // f -> g -> musttail f ==> f -> f 2173 // f -> g -> tail f ==> f -> f 2174 // 2175 // Inlined notail calls should remain notail calls. 2176 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 2177 if (ChildTCK != CallInst::TCK_NoTail) 2178 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 2179 CI->setTailCallKind(ChildTCK); 2180 InlinedMustTailCalls |= CI->isMustTailCall(); 2181 2182 // Calls inlined through a 'nounwind' call site should be marked 2183 // 'nounwind'. 2184 if (MarkNoUnwind) 2185 CI->setDoesNotThrow(); 2186 } 2187 } 2188 } 2189 2190 // Leave lifetime markers for the static alloca's, scoping them to the 2191 // function we just inlined. 2192 // We need to insert lifetime intrinsics even at O0 to avoid invalid 2193 // access caused by multithreaded coroutines. The check 2194 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only. 2195 if ((InsertLifetime || Caller->isPresplitCoroutine()) && 2196 !IFI.StaticAllocas.empty()) { 2197 IRBuilder<> builder(&FirstNewBlock->front()); 2198 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 2199 AllocaInst *AI = IFI.StaticAllocas[ai]; 2200 // Don't mark swifterror allocas. They can't have bitcast uses. 2201 if (AI->isSwiftError()) 2202 continue; 2203 2204 // If the alloca is already scoped to something smaller than the whole 2205 // function then there's no need to add redundant, less accurate markers. 2206 if (hasLifetimeMarkers(AI)) 2207 continue; 2208 2209 // Try to determine the size of the allocation. 2210 ConstantInt *AllocaSize = nullptr; 2211 if (ConstantInt *AIArraySize = 2212 dyn_cast<ConstantInt>(AI->getArraySize())) { 2213 auto &DL = Caller->getParent()->getDataLayout(); 2214 Type *AllocaType = AI->getAllocatedType(); 2215 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 2216 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 2217 2218 // Don't add markers for zero-sized allocas. 2219 if (AllocaArraySize == 0) 2220 continue; 2221 2222 // Check that array size doesn't saturate uint64_t and doesn't 2223 // overflow when it's multiplied by type size. 2224 if (!AllocaTypeSize.isScalable() && 2225 AllocaArraySize != std::numeric_limits<uint64_t>::max() && 2226 std::numeric_limits<uint64_t>::max() / AllocaArraySize >= 2227 AllocaTypeSize.getFixedSize()) { 2228 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 2229 AllocaArraySize * AllocaTypeSize); 2230 } 2231 } 2232 2233 builder.CreateLifetimeStart(AI, AllocaSize); 2234 for (ReturnInst *RI : Returns) { 2235 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 2236 // call and a return. The return kills all local allocas. 2237 if (InlinedMustTailCalls && 2238 RI->getParent()->getTerminatingMustTailCall()) 2239 continue; 2240 if (InlinedDeoptimizeCalls && 2241 RI->getParent()->getTerminatingDeoptimizeCall()) 2242 continue; 2243 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 2244 } 2245 } 2246 } 2247 2248 // If the inlined code contained dynamic alloca instructions, wrap the inlined 2249 // code with llvm.stacksave/llvm.stackrestore intrinsics. 2250 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 2251 Module *M = Caller->getParent(); 2252 // Get the two intrinsics we care about. 2253 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 2254 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 2255 2256 // Insert the llvm.stacksave. 2257 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 2258 .CreateCall(StackSave, {}, "savedstack"); 2259 2260 // Insert a call to llvm.stackrestore before any return instructions in the 2261 // inlined function. 2262 for (ReturnInst *RI : Returns) { 2263 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 2264 // call and a return. The return will restore the stack pointer. 2265 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 2266 continue; 2267 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 2268 continue; 2269 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 2270 } 2271 } 2272 2273 // If we are inlining for an invoke instruction, we must make sure to rewrite 2274 // any call instructions into invoke instructions. This is sensitive to which 2275 // funclet pads were top-level in the inlinee, so must be done before 2276 // rewriting the "parent pad" links. 2277 if (auto *II = dyn_cast<InvokeInst>(&CB)) { 2278 BasicBlock *UnwindDest = II->getUnwindDest(); 2279 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 2280 if (isa<LandingPadInst>(FirstNonPHI)) { 2281 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 2282 } else { 2283 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 2284 } 2285 } 2286 2287 // Update the lexical scopes of the new funclets and callsites. 2288 // Anything that had 'none' as its parent is now nested inside the callsite's 2289 // EHPad. 2290 2291 if (CallSiteEHPad) { 2292 for (Function::iterator BB = FirstNewBlock->getIterator(), 2293 E = Caller->end(); 2294 BB != E; ++BB) { 2295 // Add bundle operands to any top-level call sites. 2296 SmallVector<OperandBundleDef, 1> OpBundles; 2297 for (Instruction &II : llvm::make_early_inc_range(*BB)) { 2298 CallBase *I = dyn_cast<CallBase>(&II); 2299 if (!I) 2300 continue; 2301 2302 // Skip call sites which are nounwind intrinsics. 2303 auto *CalledFn = 2304 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts()); 2305 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow()) 2306 continue; 2307 2308 // Skip call sites which already have a "funclet" bundle. 2309 if (I->getOperandBundle(LLVMContext::OB_funclet)) 2310 continue; 2311 2312 I->getOperandBundlesAsDefs(OpBundles); 2313 OpBundles.emplace_back("funclet", CallSiteEHPad); 2314 2315 Instruction *NewInst = CallBase::Create(I, OpBundles, I); 2316 NewInst->takeName(I); 2317 I->replaceAllUsesWith(NewInst); 2318 I->eraseFromParent(); 2319 2320 OpBundles.clear(); 2321 } 2322 2323 // It is problematic if the inlinee has a cleanupret which unwinds to 2324 // caller and we inline it into a call site which doesn't unwind but into 2325 // an EH pad that does. Such an edge must be dynamically unreachable. 2326 // As such, we replace the cleanupret with unreachable. 2327 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 2328 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 2329 changeToUnreachable(CleanupRet); 2330 2331 Instruction *I = BB->getFirstNonPHI(); 2332 if (!I->isEHPad()) 2333 continue; 2334 2335 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 2336 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 2337 CatchSwitch->setParentPad(CallSiteEHPad); 2338 } else { 2339 auto *FPI = cast<FuncletPadInst>(I); 2340 if (isa<ConstantTokenNone>(FPI->getParentPad())) 2341 FPI->setParentPad(CallSiteEHPad); 2342 } 2343 } 2344 } 2345 2346 if (InlinedDeoptimizeCalls) { 2347 // We need to at least remove the deoptimizing returns from the Return set, 2348 // so that the control flow from those returns does not get merged into the 2349 // caller (but terminate it instead). If the caller's return type does not 2350 // match the callee's return type, we also need to change the return type of 2351 // the intrinsic. 2352 if (Caller->getReturnType() == CB.getType()) { 2353 llvm::erase_if(Returns, [](ReturnInst *RI) { 2354 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 2355 }); 2356 } else { 2357 SmallVector<ReturnInst *, 8> NormalReturns; 2358 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 2359 Caller->getParent(), Intrinsic::experimental_deoptimize, 2360 {Caller->getReturnType()}); 2361 2362 for (ReturnInst *RI : Returns) { 2363 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 2364 if (!DeoptCall) { 2365 NormalReturns.push_back(RI); 2366 continue; 2367 } 2368 2369 // The calling convention on the deoptimize call itself may be bogus, 2370 // since the code we're inlining may have undefined behavior (and may 2371 // never actually execute at runtime); but all 2372 // @llvm.experimental.deoptimize declarations have to have the same 2373 // calling convention in a well-formed module. 2374 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2375 NewDeoptIntrinsic->setCallingConv(CallingConv); 2376 auto *CurBB = RI->getParent(); 2377 RI->eraseFromParent(); 2378 2379 SmallVector<Value *, 4> CallArgs(DeoptCall->args()); 2380 2381 SmallVector<OperandBundleDef, 1> OpBundles; 2382 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2383 auto DeoptAttributes = DeoptCall->getAttributes(); 2384 DeoptCall->eraseFromParent(); 2385 assert(!OpBundles.empty() && 2386 "Expected at least the deopt operand bundle"); 2387 2388 IRBuilder<> Builder(CurBB); 2389 CallInst *NewDeoptCall = 2390 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2391 NewDeoptCall->setCallingConv(CallingConv); 2392 NewDeoptCall->setAttributes(DeoptAttributes); 2393 if (NewDeoptCall->getType()->isVoidTy()) 2394 Builder.CreateRetVoid(); 2395 else 2396 Builder.CreateRet(NewDeoptCall); 2397 } 2398 2399 // Leave behind the normal returns so we can merge control flow. 2400 std::swap(Returns, NormalReturns); 2401 } 2402 } 2403 2404 // Handle any inlined musttail call sites. In order for a new call site to be 2405 // musttail, the source of the clone and the inlined call site must have been 2406 // musttail. Therefore it's safe to return without merging control into the 2407 // phi below. 2408 if (InlinedMustTailCalls) { 2409 // Check if we need to bitcast the result of any musttail calls. 2410 Type *NewRetTy = Caller->getReturnType(); 2411 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy; 2412 2413 // Handle the returns preceded by musttail calls separately. 2414 SmallVector<ReturnInst *, 8> NormalReturns; 2415 for (ReturnInst *RI : Returns) { 2416 CallInst *ReturnedMustTail = 2417 RI->getParent()->getTerminatingMustTailCall(); 2418 if (!ReturnedMustTail) { 2419 NormalReturns.push_back(RI); 2420 continue; 2421 } 2422 if (!NeedBitCast) 2423 continue; 2424 2425 // Delete the old return and any preceding bitcast. 2426 BasicBlock *CurBB = RI->getParent(); 2427 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2428 RI->eraseFromParent(); 2429 if (OldCast) 2430 OldCast->eraseFromParent(); 2431 2432 // Insert a new bitcast and return with the right type. 2433 IRBuilder<> Builder(CurBB); 2434 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2435 } 2436 2437 // Leave behind the normal returns so we can merge control flow. 2438 std::swap(Returns, NormalReturns); 2439 } 2440 2441 // Now that all of the transforms on the inlined code have taken place but 2442 // before we splice the inlined code into the CFG and lose track of which 2443 // blocks were actually inlined, collect the call sites. We only do this if 2444 // call graph updates weren't requested, as those provide value handle based 2445 // tracking of inlined call sites instead. Calls to intrinsics are not 2446 // collected because they are not inlineable. 2447 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) { 2448 // Otherwise just collect the raw call sites that were inlined. 2449 for (BasicBlock &NewBB : 2450 make_range(FirstNewBlock->getIterator(), Caller->end())) 2451 for (Instruction &I : NewBB) 2452 if (auto *CB = dyn_cast<CallBase>(&I)) 2453 if (!(CB->getCalledFunction() && 2454 CB->getCalledFunction()->isIntrinsic())) 2455 IFI.InlinedCallSites.push_back(CB); 2456 } 2457 2458 // If we cloned in _exactly one_ basic block, and if that block ends in a 2459 // return instruction, we splice the body of the inlined callee directly into 2460 // the calling basic block. 2461 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2462 // Move all of the instructions right before the call. 2463 OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(), 2464 FirstNewBlock->begin(), FirstNewBlock->end()); 2465 // Remove the cloned basic block. 2466 Caller->getBasicBlockList().pop_back(); 2467 2468 // If the call site was an invoke instruction, add a branch to the normal 2469 // destination. 2470 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 2471 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB); 2472 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2473 } 2474 2475 // If the return instruction returned a value, replace uses of the call with 2476 // uses of the returned value. 2477 if (!CB.use_empty()) { 2478 ReturnInst *R = Returns[0]; 2479 if (&CB == R->getReturnValue()) 2480 CB.replaceAllUsesWith(UndefValue::get(CB.getType())); 2481 else 2482 CB.replaceAllUsesWith(R->getReturnValue()); 2483 } 2484 // Since we are now done with the Call/Invoke, we can delete it. 2485 CB.eraseFromParent(); 2486 2487 // Since we are now done with the return instruction, delete it also. 2488 Returns[0]->eraseFromParent(); 2489 2490 // We are now done with the inlining. 2491 return InlineResult::success(); 2492 } 2493 2494 // Otherwise, we have the normal case, of more than one block to inline or 2495 // multiple return sites. 2496 2497 // We want to clone the entire callee function into the hole between the 2498 // "starter" and "ender" blocks. How we accomplish this depends on whether 2499 // this is an invoke instruction or a call instruction. 2500 BasicBlock *AfterCallBB; 2501 BranchInst *CreatedBranchToNormalDest = nullptr; 2502 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 2503 2504 // Add an unconditional branch to make this look like the CallInst case... 2505 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB); 2506 2507 // Split the basic block. This guarantees that no PHI nodes will have to be 2508 // updated due to new incoming edges, and make the invoke case more 2509 // symmetric to the call case. 2510 AfterCallBB = 2511 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2512 CalledFunc->getName() + ".exit"); 2513 2514 } else { // It's a call 2515 // If this is a call instruction, we need to split the basic block that 2516 // the call lives in. 2517 // 2518 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(), 2519 CalledFunc->getName() + ".exit"); 2520 } 2521 2522 if (IFI.CallerBFI) { 2523 // Copy original BB's block frequency to AfterCallBB 2524 IFI.CallerBFI->setBlockFreq( 2525 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2526 } 2527 2528 // Change the branch that used to go to AfterCallBB to branch to the first 2529 // basic block of the inlined function. 2530 // 2531 Instruction *Br = OrigBB->getTerminator(); 2532 assert(Br && Br->getOpcode() == Instruction::Br && 2533 "splitBasicBlock broken!"); 2534 Br->setOperand(0, &*FirstNewBlock); 2535 2536 // Now that the function is correct, make it a little bit nicer. In 2537 // particular, move the basic blocks inserted from the end of the function 2538 // into the space made by splitting the source basic block. 2539 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2540 Caller->getBasicBlockList(), FirstNewBlock, 2541 Caller->end()); 2542 2543 // Handle all of the return instructions that we just cloned in, and eliminate 2544 // any users of the original call/invoke instruction. 2545 Type *RTy = CalledFunc->getReturnType(); 2546 2547 PHINode *PHI = nullptr; 2548 if (Returns.size() > 1) { 2549 // The PHI node should go at the front of the new basic block to merge all 2550 // possible incoming values. 2551 if (!CB.use_empty()) { 2552 PHI = PHINode::Create(RTy, Returns.size(), CB.getName(), 2553 &AfterCallBB->front()); 2554 // Anything that used the result of the function call should now use the 2555 // PHI node as their operand. 2556 CB.replaceAllUsesWith(PHI); 2557 } 2558 2559 // Loop over all of the return instructions adding entries to the PHI node 2560 // as appropriate. 2561 if (PHI) { 2562 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2563 ReturnInst *RI = Returns[i]; 2564 assert(RI->getReturnValue()->getType() == PHI->getType() && 2565 "Ret value not consistent in function!"); 2566 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2567 } 2568 } 2569 2570 // Add a branch to the merge points and remove return instructions. 2571 DebugLoc Loc; 2572 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2573 ReturnInst *RI = Returns[i]; 2574 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2575 Loc = RI->getDebugLoc(); 2576 BI->setDebugLoc(Loc); 2577 RI->eraseFromParent(); 2578 } 2579 // We need to set the debug location to *somewhere* inside the 2580 // inlined function. The line number may be nonsensical, but the 2581 // instruction will at least be associated with the right 2582 // function. 2583 if (CreatedBranchToNormalDest) 2584 CreatedBranchToNormalDest->setDebugLoc(Loc); 2585 } else if (!Returns.empty()) { 2586 // Otherwise, if there is exactly one return value, just replace anything 2587 // using the return value of the call with the computed value. 2588 if (!CB.use_empty()) { 2589 if (&CB == Returns[0]->getReturnValue()) 2590 CB.replaceAllUsesWith(UndefValue::get(CB.getType())); 2591 else 2592 CB.replaceAllUsesWith(Returns[0]->getReturnValue()); 2593 } 2594 2595 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2596 BasicBlock *ReturnBB = Returns[0]->getParent(); 2597 ReturnBB->replaceAllUsesWith(AfterCallBB); 2598 2599 // Splice the code from the return block into the block that it will return 2600 // to, which contains the code that was after the call. 2601 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2602 ReturnBB->getInstList()); 2603 2604 if (CreatedBranchToNormalDest) 2605 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2606 2607 // Delete the return instruction now and empty ReturnBB now. 2608 Returns[0]->eraseFromParent(); 2609 ReturnBB->eraseFromParent(); 2610 } else if (!CB.use_empty()) { 2611 // No returns, but something is using the return value of the call. Just 2612 // nuke the result. 2613 CB.replaceAllUsesWith(UndefValue::get(CB.getType())); 2614 } 2615 2616 // Since we are now done with the Call/Invoke, we can delete it. 2617 CB.eraseFromParent(); 2618 2619 // If we inlined any musttail calls and the original return is now 2620 // unreachable, delete it. It can only contain a bitcast and ret. 2621 if (InlinedMustTailCalls && pred_empty(AfterCallBB)) 2622 AfterCallBB->eraseFromParent(); 2623 2624 // We should always be able to fold the entry block of the function into the 2625 // single predecessor of the block... 2626 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2627 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2628 2629 // Splice the code entry block into calling block, right before the 2630 // unconditional branch. 2631 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2632 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2633 2634 // Remove the unconditional branch. 2635 OrigBB->getInstList().erase(Br); 2636 2637 // Now we can remove the CalleeEntry block, which is now empty. 2638 Caller->getBasicBlockList().erase(CalleeEntry); 2639 2640 // If we inserted a phi node, check to see if it has a single value (e.g. all 2641 // the entries are the same or undef). If so, remove the PHI so it doesn't 2642 // block other optimizations. 2643 if (PHI) { 2644 AssumptionCache *AC = 2645 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 2646 auto &DL = Caller->getParent()->getDataLayout(); 2647 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) { 2648 PHI->replaceAllUsesWith(V); 2649 PHI->eraseFromParent(); 2650 } 2651 } 2652 2653 return InlineResult::success(); 2654 } 2655