1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements inlining of a function into a call site, resolving 10 // parameters and the return value as appropriate. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/CallGraph.h" 25 #include "llvm/Analysis/CaptureTracking.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/MemoryProfileInfo.h" 28 #include "llvm/Analysis/ObjCARCAnalysisUtils.h" 29 #include "llvm/Analysis/ObjCARCUtil.h" 30 #include "llvm/Analysis/ProfileSummaryInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/Analysis/VectorUtils.h" 33 #include "llvm/IR/Argument.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CFG.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DebugInfo.h" 40 #include "llvm/IR/DebugInfoMetadata.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/EHPersonalities.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/IRBuilder.h" 47 #include "llvm/IR/InlineAsm.h" 48 #include "llvm/IR/InstrTypes.h" 49 #include "llvm/IR/Instruction.h" 50 #include "llvm/IR/Instructions.h" 51 #include "llvm/IR/IntrinsicInst.h" 52 #include "llvm/IR/Intrinsics.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/IR/MDBuilder.h" 55 #include "llvm/IR/Metadata.h" 56 #include "llvm/IR/Module.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/User.h" 59 #include "llvm/IR/Value.h" 60 #include "llvm/Support/Casting.h" 61 #include "llvm/Support/CommandLine.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 64 #include "llvm/Transforms/Utils/Cloning.h" 65 #include "llvm/Transforms/Utils/Local.h" 66 #include "llvm/Transforms/Utils/ValueMapper.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <cstdint> 70 #include <iterator> 71 #include <limits> 72 #include <optional> 73 #include <string> 74 #include <utility> 75 #include <vector> 76 77 #define DEBUG_TYPE "inline-function" 78 79 using namespace llvm; 80 using namespace llvm::memprof; 81 using ProfileCount = Function::ProfileCount; 82 83 static cl::opt<bool> 84 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 85 cl::Hidden, 86 cl::desc("Convert noalias attributes to metadata during inlining.")); 87 88 static cl::opt<bool> 89 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, 90 cl::init(true), 91 cl::desc("Use the llvm.experimental.noalias.scope.decl " 92 "intrinsic during inlining.")); 93 94 // Disabled by default, because the added alignment assumptions may increase 95 // compile-time and block optimizations. This option is not suitable for use 96 // with frontends that emit comprehensive parameter alignment annotations. 97 static cl::opt<bool> 98 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 99 cl::init(false), cl::Hidden, 100 cl::desc("Convert align attributes to assumptions during inlining.")); 101 102 static cl::opt<unsigned> InlinerAttributeWindow( 103 "max-inst-checked-for-throw-during-inlining", cl::Hidden, 104 cl::desc("the maximum number of instructions analyzed for may throw during " 105 "attribute inference in inlined body"), 106 cl::init(4)); 107 108 namespace { 109 110 /// A class for recording information about inlining a landing pad. 111 class LandingPadInliningInfo { 112 /// Destination of the invoke's unwind. 113 BasicBlock *OuterResumeDest; 114 115 /// Destination for the callee's resume. 116 BasicBlock *InnerResumeDest = nullptr; 117 118 /// LandingPadInst associated with the invoke. 119 LandingPadInst *CallerLPad = nullptr; 120 121 /// PHI for EH values from landingpad insts. 122 PHINode *InnerEHValuesPHI = nullptr; 123 124 SmallVector<Value*, 8> UnwindDestPHIValues; 125 126 public: 127 LandingPadInliningInfo(InvokeInst *II) 128 : OuterResumeDest(II->getUnwindDest()) { 129 // If there are PHI nodes in the unwind destination block, we need to keep 130 // track of which values came into them from the invoke before removing 131 // the edge from this block. 132 BasicBlock *InvokeBB = II->getParent(); 133 BasicBlock::iterator I = OuterResumeDest->begin(); 134 for (; isa<PHINode>(I); ++I) { 135 // Save the value to use for this edge. 136 PHINode *PHI = cast<PHINode>(I); 137 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 138 } 139 140 CallerLPad = cast<LandingPadInst>(I); 141 } 142 143 /// The outer unwind destination is the target of 144 /// unwind edges introduced for calls within the inlined function. 145 BasicBlock *getOuterResumeDest() const { 146 return OuterResumeDest; 147 } 148 149 BasicBlock *getInnerResumeDest(); 150 151 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 152 153 /// Forward the 'resume' instruction to the caller's landing pad block. 154 /// When the landing pad block has only one predecessor, this is 155 /// a simple branch. When there is more than one predecessor, we need to 156 /// split the landing pad block after the landingpad instruction and jump 157 /// to there. 158 void forwardResume(ResumeInst *RI, 159 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 160 161 /// Add incoming-PHI values to the unwind destination block for the given 162 /// basic block, using the values for the original invoke's source block. 163 void addIncomingPHIValuesFor(BasicBlock *BB) const { 164 addIncomingPHIValuesForInto(BB, OuterResumeDest); 165 } 166 167 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 168 BasicBlock::iterator I = dest->begin(); 169 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 170 PHINode *phi = cast<PHINode>(I); 171 phi->addIncoming(UnwindDestPHIValues[i], src); 172 } 173 } 174 }; 175 176 } // end anonymous namespace 177 178 /// Get or create a target for the branch from ResumeInsts. 179 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 180 if (InnerResumeDest) return InnerResumeDest; 181 182 // Split the landing pad. 183 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 184 InnerResumeDest = 185 OuterResumeDest->splitBasicBlock(SplitPoint, 186 OuterResumeDest->getName() + ".body"); 187 188 // The number of incoming edges we expect to the inner landing pad. 189 const unsigned PHICapacity = 2; 190 191 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 192 Instruction *InsertPoint = &InnerResumeDest->front(); 193 BasicBlock::iterator I = OuterResumeDest->begin(); 194 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 195 PHINode *OuterPHI = cast<PHINode>(I); 196 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 197 OuterPHI->getName() + ".lpad-body", 198 InsertPoint); 199 OuterPHI->replaceAllUsesWith(InnerPHI); 200 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 201 } 202 203 // Create a PHI for the exception values. 204 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 205 "eh.lpad-body", InsertPoint); 206 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 207 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 208 209 // All done. 210 return InnerResumeDest; 211 } 212 213 /// Forward the 'resume' instruction to the caller's landing pad block. 214 /// When the landing pad block has only one predecessor, this is a simple 215 /// branch. When there is more than one predecessor, we need to split the 216 /// landing pad block after the landingpad instruction and jump to there. 217 void LandingPadInliningInfo::forwardResume( 218 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 219 BasicBlock *Dest = getInnerResumeDest(); 220 BasicBlock *Src = RI->getParent(); 221 222 BranchInst::Create(Dest, Src); 223 224 // Update the PHIs in the destination. They were inserted in an order which 225 // makes this work. 226 addIncomingPHIValuesForInto(Src, Dest); 227 228 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 229 RI->eraseFromParent(); 230 } 231 232 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 233 static Value *getParentPad(Value *EHPad) { 234 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 235 return FPI->getParentPad(); 236 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 237 } 238 239 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>; 240 241 /// Helper for getUnwindDestToken that does the descendant-ward part of 242 /// the search. 243 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 244 UnwindDestMemoTy &MemoMap) { 245 SmallVector<Instruction *, 8> Worklist(1, EHPad); 246 247 while (!Worklist.empty()) { 248 Instruction *CurrentPad = Worklist.pop_back_val(); 249 // We only put pads on the worklist that aren't in the MemoMap. When 250 // we find an unwind dest for a pad we may update its ancestors, but 251 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 252 // so they should never get updated while queued on the worklist. 253 assert(!MemoMap.count(CurrentPad)); 254 Value *UnwindDestToken = nullptr; 255 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 256 if (CatchSwitch->hasUnwindDest()) { 257 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 258 } else { 259 // Catchswitch doesn't have a 'nounwind' variant, and one might be 260 // annotated as "unwinds to caller" when really it's nounwind (see 261 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 262 // parent's unwind dest from this. We can check its catchpads' 263 // descendants, since they might include a cleanuppad with an 264 // "unwinds to caller" cleanupret, which can be trusted. 265 for (auto HI = CatchSwitch->handler_begin(), 266 HE = CatchSwitch->handler_end(); 267 HI != HE && !UnwindDestToken; ++HI) { 268 BasicBlock *HandlerBlock = *HI; 269 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 270 for (User *Child : CatchPad->users()) { 271 // Intentionally ignore invokes here -- since the catchswitch is 272 // marked "unwind to caller", it would be a verifier error if it 273 // contained an invoke which unwinds out of it, so any invoke we'd 274 // encounter must unwind to some child of the catch. 275 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 276 continue; 277 278 Instruction *ChildPad = cast<Instruction>(Child); 279 auto Memo = MemoMap.find(ChildPad); 280 if (Memo == MemoMap.end()) { 281 // Haven't figured out this child pad yet; queue it. 282 Worklist.push_back(ChildPad); 283 continue; 284 } 285 // We've already checked this child, but might have found that 286 // it offers no proof either way. 287 Value *ChildUnwindDestToken = Memo->second; 288 if (!ChildUnwindDestToken) 289 continue; 290 // We already know the child's unwind dest, which can either 291 // be ConstantTokenNone to indicate unwind to caller, or can 292 // be another child of the catchpad. Only the former indicates 293 // the unwind dest of the catchswitch. 294 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 295 UnwindDestToken = ChildUnwindDestToken; 296 break; 297 } 298 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 299 } 300 } 301 } 302 } else { 303 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 304 for (User *U : CleanupPad->users()) { 305 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 306 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 307 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 308 else 309 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 310 break; 311 } 312 Value *ChildUnwindDestToken; 313 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 314 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 315 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 316 Instruction *ChildPad = cast<Instruction>(U); 317 auto Memo = MemoMap.find(ChildPad); 318 if (Memo == MemoMap.end()) { 319 // Haven't resolved this child yet; queue it and keep searching. 320 Worklist.push_back(ChildPad); 321 continue; 322 } 323 // We've checked this child, but still need to ignore it if it 324 // had no proof either way. 325 ChildUnwindDestToken = Memo->second; 326 if (!ChildUnwindDestToken) 327 continue; 328 } else { 329 // Not a relevant user of the cleanuppad 330 continue; 331 } 332 // In a well-formed program, the child/invoke must either unwind to 333 // an(other) child of the cleanup, or exit the cleanup. In the 334 // first case, continue searching. 335 if (isa<Instruction>(ChildUnwindDestToken) && 336 getParentPad(ChildUnwindDestToken) == CleanupPad) 337 continue; 338 UnwindDestToken = ChildUnwindDestToken; 339 break; 340 } 341 } 342 // If we haven't found an unwind dest for CurrentPad, we may have queued its 343 // children, so move on to the next in the worklist. 344 if (!UnwindDestToken) 345 continue; 346 347 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 348 // any ancestors of CurrentPad up to but not including UnwindDestToken's 349 // parent pad. Record this in the memo map, and check to see if the 350 // original EHPad being queried is one of the ones exited. 351 Value *UnwindParent; 352 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 353 UnwindParent = getParentPad(UnwindPad); 354 else 355 UnwindParent = nullptr; 356 bool ExitedOriginalPad = false; 357 for (Instruction *ExitedPad = CurrentPad; 358 ExitedPad && ExitedPad != UnwindParent; 359 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 360 // Skip over catchpads since they just follow their catchswitches. 361 if (isa<CatchPadInst>(ExitedPad)) 362 continue; 363 MemoMap[ExitedPad] = UnwindDestToken; 364 ExitedOriginalPad |= (ExitedPad == EHPad); 365 } 366 367 if (ExitedOriginalPad) 368 return UnwindDestToken; 369 370 // Continue the search. 371 } 372 373 // No definitive information is contained within this funclet. 374 return nullptr; 375 } 376 377 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 378 /// return that pad instruction. If it unwinds to caller, return 379 /// ConstantTokenNone. If it does not have a definitive unwind destination, 380 /// return nullptr. 381 /// 382 /// This routine gets invoked for calls in funclets in inlinees when inlining 383 /// an invoke. Since many funclets don't have calls inside them, it's queried 384 /// on-demand rather than building a map of pads to unwind dests up front. 385 /// Determining a funclet's unwind dest may require recursively searching its 386 /// descendants, and also ancestors and cousins if the descendants don't provide 387 /// an answer. Since most funclets will have their unwind dest immediately 388 /// available as the unwind dest of a catchswitch or cleanupret, this routine 389 /// searches top-down from the given pad and then up. To avoid worst-case 390 /// quadratic run-time given that approach, it uses a memo map to avoid 391 /// re-processing funclet trees. The callers that rewrite the IR as they go 392 /// take advantage of this, for correctness, by checking/forcing rewritten 393 /// pads' entries to match the original callee view. 394 static Value *getUnwindDestToken(Instruction *EHPad, 395 UnwindDestMemoTy &MemoMap) { 396 // Catchpads unwind to the same place as their catchswitch; 397 // redirct any queries on catchpads so the code below can 398 // deal with just catchswitches and cleanuppads. 399 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 400 EHPad = CPI->getCatchSwitch(); 401 402 // Check if we've already determined the unwind dest for this pad. 403 auto Memo = MemoMap.find(EHPad); 404 if (Memo != MemoMap.end()) 405 return Memo->second; 406 407 // Search EHPad and, if necessary, its descendants. 408 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 409 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 410 if (UnwindDestToken) 411 return UnwindDestToken; 412 413 // No information is available for this EHPad from itself or any of its 414 // descendants. An unwind all the way out to a pad in the caller would 415 // need also to agree with the unwind dest of the parent funclet, so 416 // search up the chain to try to find a funclet with information. Put 417 // null entries in the memo map to avoid re-processing as we go up. 418 MemoMap[EHPad] = nullptr; 419 #ifndef NDEBUG 420 SmallPtrSet<Instruction *, 4> TempMemos; 421 TempMemos.insert(EHPad); 422 #endif 423 Instruction *LastUselessPad = EHPad; 424 Value *AncestorToken; 425 for (AncestorToken = getParentPad(EHPad); 426 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 427 AncestorToken = getParentPad(AncestorToken)) { 428 // Skip over catchpads since they just follow their catchswitches. 429 if (isa<CatchPadInst>(AncestorPad)) 430 continue; 431 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 432 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 433 // call to getUnwindDestToken, that would mean that AncestorPad had no 434 // information in itself, its descendants, or its ancestors. If that 435 // were the case, then we should also have recorded the lack of information 436 // for the descendant that we're coming from. So assert that we don't 437 // find a null entry in the MemoMap for AncestorPad. 438 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 439 auto AncestorMemo = MemoMap.find(AncestorPad); 440 if (AncestorMemo == MemoMap.end()) { 441 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 442 } else { 443 UnwindDestToken = AncestorMemo->second; 444 } 445 if (UnwindDestToken) 446 break; 447 LastUselessPad = AncestorPad; 448 MemoMap[LastUselessPad] = nullptr; 449 #ifndef NDEBUG 450 TempMemos.insert(LastUselessPad); 451 #endif 452 } 453 454 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 455 // returned nullptr (and likewise for EHPad and any of its ancestors up to 456 // LastUselessPad), so LastUselessPad has no information from below. Since 457 // getUnwindDestTokenHelper must investigate all downward paths through 458 // no-information nodes to prove that a node has no information like this, 459 // and since any time it finds information it records it in the MemoMap for 460 // not just the immediately-containing funclet but also any ancestors also 461 // exited, it must be the case that, walking downward from LastUselessPad, 462 // visiting just those nodes which have not been mapped to an unwind dest 463 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 464 // they are just used to keep getUnwindDestTokenHelper from repeating work), 465 // any node visited must have been exhaustively searched with no information 466 // for it found. 467 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 468 while (!Worklist.empty()) { 469 Instruction *UselessPad = Worklist.pop_back_val(); 470 auto Memo = MemoMap.find(UselessPad); 471 if (Memo != MemoMap.end() && Memo->second) { 472 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 473 // that it is a funclet that does have information about unwinding to 474 // a particular destination; its parent was a useless pad. 475 // Since its parent has no information, the unwind edge must not escape 476 // the parent, and must target a sibling of this pad. This local unwind 477 // gives us no information about EHPad. Leave it and the subtree rooted 478 // at it alone. 479 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 480 continue; 481 } 482 // We know we don't have information for UselesPad. If it has an entry in 483 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 484 // added on this invocation of getUnwindDestToken; if a previous invocation 485 // recorded nullptr, it would have had to prove that the ancestors of 486 // UselessPad, which include LastUselessPad, had no information, and that 487 // in turn would have required proving that the descendants of 488 // LastUselesPad, which include EHPad, have no information about 489 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 490 // the MemoMap on that invocation, which isn't the case if we got here. 491 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 492 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 493 // information that we'd be contradicting by making a map entry for it 494 // (which is something that getUnwindDestTokenHelper must have proved for 495 // us to get here). Just assert on is direct users here; the checks in 496 // this downward walk at its descendants will verify that they don't have 497 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 498 // unwind edges or unwind to a sibling). 499 MemoMap[UselessPad] = UnwindDestToken; 500 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 501 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 502 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 503 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 504 for (User *U : CatchPad->users()) { 505 assert( 506 (!isa<InvokeInst>(U) || 507 (getParentPad( 508 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 509 CatchPad)) && 510 "Expected useless pad"); 511 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 512 Worklist.push_back(cast<Instruction>(U)); 513 } 514 } 515 } else { 516 assert(isa<CleanupPadInst>(UselessPad)); 517 for (User *U : UselessPad->users()) { 518 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 519 assert((!isa<InvokeInst>(U) || 520 (getParentPad( 521 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 522 UselessPad)) && 523 "Expected useless pad"); 524 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 525 Worklist.push_back(cast<Instruction>(U)); 526 } 527 } 528 } 529 530 return UnwindDestToken; 531 } 532 533 /// When we inline a basic block into an invoke, 534 /// we have to turn all of the calls that can throw into invokes. 535 /// This function analyze BB to see if there are any calls, and if so, 536 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 537 /// nodes in that block with the values specified in InvokeDestPHIValues. 538 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 539 BasicBlock *BB, BasicBlock *UnwindEdge, 540 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 541 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 542 // We only need to check for function calls: inlined invoke 543 // instructions require no special handling. 544 CallInst *CI = dyn_cast<CallInst>(&I); 545 546 if (!CI || CI->doesNotThrow()) 547 continue; 548 549 // We do not need to (and in fact, cannot) convert possibly throwing calls 550 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 551 // invokes. The caller's "segment" of the deoptimization continuation 552 // attached to the newly inlined @llvm.experimental_deoptimize 553 // (resp. @llvm.experimental.guard) call should contain the exception 554 // handling logic, if any. 555 if (auto *F = CI->getCalledFunction()) 556 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 557 F->getIntrinsicID() == Intrinsic::experimental_guard) 558 continue; 559 560 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 561 // This call is nested inside a funclet. If that funclet has an unwind 562 // destination within the inlinee, then unwinding out of this call would 563 // be UB. Rewriting this call to an invoke which targets the inlined 564 // invoke's unwind dest would give the call's parent funclet multiple 565 // unwind destinations, which is something that subsequent EH table 566 // generation can't handle and that the veirifer rejects. So when we 567 // see such a call, leave it as a call. 568 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 569 Value *UnwindDestToken = 570 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 571 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 572 continue; 573 #ifndef NDEBUG 574 Instruction *MemoKey; 575 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 576 MemoKey = CatchPad->getCatchSwitch(); 577 else 578 MemoKey = FuncletPad; 579 assert(FuncletUnwindMap->count(MemoKey) && 580 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 581 "must get memoized to avoid confusing later searches"); 582 #endif // NDEBUG 583 } 584 585 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 586 return BB; 587 } 588 return nullptr; 589 } 590 591 /// If we inlined an invoke site, we need to convert calls 592 /// in the body of the inlined function into invokes. 593 /// 594 /// II is the invoke instruction being inlined. FirstNewBlock is the first 595 /// block of the inlined code (the last block is the end of the function), 596 /// and InlineCodeInfo is information about the code that got inlined. 597 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 598 ClonedCodeInfo &InlinedCodeInfo) { 599 BasicBlock *InvokeDest = II->getUnwindDest(); 600 601 Function *Caller = FirstNewBlock->getParent(); 602 603 // The inlined code is currently at the end of the function, scan from the 604 // start of the inlined code to its end, checking for stuff we need to 605 // rewrite. 606 LandingPadInliningInfo Invoke(II); 607 608 // Get all of the inlined landing pad instructions. 609 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 610 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 611 I != E; ++I) 612 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 613 InlinedLPads.insert(II->getLandingPadInst()); 614 615 // Append the clauses from the outer landing pad instruction into the inlined 616 // landing pad instructions. 617 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 618 for (LandingPadInst *InlinedLPad : InlinedLPads) { 619 unsigned OuterNum = OuterLPad->getNumClauses(); 620 InlinedLPad->reserveClauses(OuterNum); 621 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 622 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 623 if (OuterLPad->isCleanup()) 624 InlinedLPad->setCleanup(true); 625 } 626 627 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 628 BB != E; ++BB) { 629 if (InlinedCodeInfo.ContainsCalls) 630 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 631 &*BB, Invoke.getOuterResumeDest())) 632 // Update any PHI nodes in the exceptional block to indicate that there 633 // is now a new entry in them. 634 Invoke.addIncomingPHIValuesFor(NewBB); 635 636 // Forward any resumes that are remaining here. 637 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 638 Invoke.forwardResume(RI, InlinedLPads); 639 } 640 641 // Now that everything is happy, we have one final detail. The PHI nodes in 642 // the exception destination block still have entries due to the original 643 // invoke instruction. Eliminate these entries (which might even delete the 644 // PHI node) now. 645 InvokeDest->removePredecessor(II->getParent()); 646 } 647 648 /// If we inlined an invoke site, we need to convert calls 649 /// in the body of the inlined function into invokes. 650 /// 651 /// II is the invoke instruction being inlined. FirstNewBlock is the first 652 /// block of the inlined code (the last block is the end of the function), 653 /// and InlineCodeInfo is information about the code that got inlined. 654 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 655 ClonedCodeInfo &InlinedCodeInfo) { 656 BasicBlock *UnwindDest = II->getUnwindDest(); 657 Function *Caller = FirstNewBlock->getParent(); 658 659 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 660 661 // If there are PHI nodes in the unwind destination block, we need to keep 662 // track of which values came into them from the invoke before removing the 663 // edge from this block. 664 SmallVector<Value *, 8> UnwindDestPHIValues; 665 BasicBlock *InvokeBB = II->getParent(); 666 for (PHINode &PHI : UnwindDest->phis()) { 667 // Save the value to use for this edge. 668 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB)); 669 } 670 671 // Add incoming-PHI values to the unwind destination block for the given basic 672 // block, using the values for the original invoke's source block. 673 auto UpdatePHINodes = [&](BasicBlock *Src) { 674 BasicBlock::iterator I = UnwindDest->begin(); 675 for (Value *V : UnwindDestPHIValues) { 676 PHINode *PHI = cast<PHINode>(I); 677 PHI->addIncoming(V, Src); 678 ++I; 679 } 680 }; 681 682 // This connects all the instructions which 'unwind to caller' to the invoke 683 // destination. 684 UnwindDestMemoTy FuncletUnwindMap; 685 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 686 BB != E; ++BB) { 687 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 688 if (CRI->unwindsToCaller()) { 689 auto *CleanupPad = CRI->getCleanupPad(); 690 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 691 CRI->eraseFromParent(); 692 UpdatePHINodes(&*BB); 693 // Finding a cleanupret with an unwind destination would confuse 694 // subsequent calls to getUnwindDestToken, so map the cleanuppad 695 // to short-circuit any such calls and recognize this as an "unwind 696 // to caller" cleanup. 697 assert(!FuncletUnwindMap.count(CleanupPad) || 698 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 699 FuncletUnwindMap[CleanupPad] = 700 ConstantTokenNone::get(Caller->getContext()); 701 } 702 } 703 704 Instruction *I = BB->getFirstNonPHI(); 705 if (!I->isEHPad()) 706 continue; 707 708 Instruction *Replacement = nullptr; 709 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 710 if (CatchSwitch->unwindsToCaller()) { 711 Value *UnwindDestToken; 712 if (auto *ParentPad = 713 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 714 // This catchswitch is nested inside another funclet. If that 715 // funclet has an unwind destination within the inlinee, then 716 // unwinding out of this catchswitch would be UB. Rewriting this 717 // catchswitch to unwind to the inlined invoke's unwind dest would 718 // give the parent funclet multiple unwind destinations, which is 719 // something that subsequent EH table generation can't handle and 720 // that the veirifer rejects. So when we see such a call, leave it 721 // as "unwind to caller". 722 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 723 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 724 continue; 725 } else { 726 // This catchswitch has no parent to inherit constraints from, and 727 // none of its descendants can have an unwind edge that exits it and 728 // targets another funclet in the inlinee. It may or may not have a 729 // descendant that definitively has an unwind to caller. In either 730 // case, we'll have to assume that any unwinds out of it may need to 731 // be routed to the caller, so treat it as though it has a definitive 732 // unwind to caller. 733 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 734 } 735 auto *NewCatchSwitch = CatchSwitchInst::Create( 736 CatchSwitch->getParentPad(), UnwindDest, 737 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 738 CatchSwitch); 739 for (BasicBlock *PadBB : CatchSwitch->handlers()) 740 NewCatchSwitch->addHandler(PadBB); 741 // Propagate info for the old catchswitch over to the new one in 742 // the unwind map. This also serves to short-circuit any subsequent 743 // checks for the unwind dest of this catchswitch, which would get 744 // confused if they found the outer handler in the callee. 745 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 746 Replacement = NewCatchSwitch; 747 } 748 } else if (!isa<FuncletPadInst>(I)) { 749 llvm_unreachable("unexpected EHPad!"); 750 } 751 752 if (Replacement) { 753 Replacement->takeName(I); 754 I->replaceAllUsesWith(Replacement); 755 I->eraseFromParent(); 756 UpdatePHINodes(&*BB); 757 } 758 } 759 760 if (InlinedCodeInfo.ContainsCalls) 761 for (Function::iterator BB = FirstNewBlock->getIterator(), 762 E = Caller->end(); 763 BB != E; ++BB) 764 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 765 &*BB, UnwindDest, &FuncletUnwindMap)) 766 // Update any PHI nodes in the exceptional block to indicate that there 767 // is now a new entry in them. 768 UpdatePHINodes(NewBB); 769 770 // Now that everything is happy, we have one final detail. The PHI nodes in 771 // the exception destination block still have entries due to the original 772 // invoke instruction. Eliminate these entries (which might even delete the 773 // PHI node) now. 774 UnwindDest->removePredecessor(InvokeBB); 775 } 776 777 static bool haveCommonPrefix(MDNode *MIBStackContext, 778 MDNode *CallsiteStackContext) { 779 assert(MIBStackContext->getNumOperands() > 0 && 780 CallsiteStackContext->getNumOperands() > 0); 781 // Because of the context trimming performed during matching, the callsite 782 // context could have more stack ids than the MIB. We match up to the end of 783 // the shortest stack context. 784 for (auto MIBStackIter = MIBStackContext->op_begin(), 785 CallsiteStackIter = CallsiteStackContext->op_begin(); 786 MIBStackIter != MIBStackContext->op_end() && 787 CallsiteStackIter != CallsiteStackContext->op_end(); 788 MIBStackIter++, CallsiteStackIter++) { 789 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter); 790 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter); 791 assert(Val1 && Val2); 792 if (Val1->getZExtValue() != Val2->getZExtValue()) 793 return false; 794 } 795 return true; 796 } 797 798 static void removeMemProfMetadata(CallBase *Call) { 799 Call->setMetadata(LLVMContext::MD_memprof, nullptr); 800 } 801 802 static void removeCallsiteMetadata(CallBase *Call) { 803 Call->setMetadata(LLVMContext::MD_callsite, nullptr); 804 } 805 806 static void updateMemprofMetadata(CallBase *CI, 807 const std::vector<Metadata *> &MIBList) { 808 assert(!MIBList.empty()); 809 // Remove existing memprof, which will either be replaced or may not be needed 810 // if we are able to use a single allocation type function attribute. 811 removeMemProfMetadata(CI); 812 CallStackTrie CallStack; 813 for (Metadata *MIB : MIBList) 814 CallStack.addCallStack(cast<MDNode>(MIB)); 815 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI); 816 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof)); 817 if (!MemprofMDAttached) 818 // If we used a function attribute remove the callsite metadata as well. 819 removeCallsiteMetadata(CI); 820 } 821 822 // Update the metadata on the inlined copy ClonedCall of a call OrigCall in the 823 // inlined callee body, based on the callsite metadata InlinedCallsiteMD from 824 // the call that was inlined. 825 static void propagateMemProfHelper(const CallBase *OrigCall, 826 CallBase *ClonedCall, 827 MDNode *InlinedCallsiteMD) { 828 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite); 829 MDNode *ClonedCallsiteMD = nullptr; 830 // Check if the call originally had callsite metadata, and update it for the 831 // new call in the inlined body. 832 if (OrigCallsiteMD) { 833 // The cloned call's context is now the concatenation of the original call's 834 // callsite metadata and the callsite metadata on the call where it was 835 // inlined. 836 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD); 837 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD); 838 } 839 840 // Update any memprof metadata on the cloned call. 841 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof); 842 if (!OrigMemProfMD) 843 return; 844 // We currently expect that allocations with memprof metadata also have 845 // callsite metadata for the allocation's part of the context. 846 assert(OrigCallsiteMD); 847 848 // New call's MIB list. 849 std::vector<Metadata *> NewMIBList; 850 851 // For each MIB metadata, check if its call stack context starts with the 852 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in 853 // the inlined body. If not, it stays on the out-of-line original call. 854 for (auto &MIBOp : OrigMemProfMD->operands()) { 855 MDNode *MIB = dyn_cast<MDNode>(MIBOp); 856 // Stack is first operand of MIB. 857 MDNode *StackMD = getMIBStackNode(MIB); 858 assert(StackMD); 859 // See if the new cloned callsite context matches this profiled context. 860 if (haveCommonPrefix(StackMD, ClonedCallsiteMD)) 861 // Add it to the cloned call's MIB list. 862 NewMIBList.push_back(MIB); 863 } 864 if (NewMIBList.empty()) { 865 removeMemProfMetadata(ClonedCall); 866 removeCallsiteMetadata(ClonedCall); 867 return; 868 } 869 if (NewMIBList.size() < OrigMemProfMD->getNumOperands()) 870 updateMemprofMetadata(ClonedCall, NewMIBList); 871 } 872 873 // Update memprof related metadata (!memprof and !callsite) based on the 874 // inlining of Callee into the callsite at CB. The updates include merging the 875 // inlined callee's callsite metadata with that of the inlined call, 876 // and moving the subset of any memprof contexts to the inlined callee 877 // allocations if they match the new inlined call stack. 878 static void 879 propagateMemProfMetadata(Function *Callee, CallBase &CB, 880 bool ContainsMemProfMetadata, 881 const ValueMap<const Value *, WeakTrackingVH> &VMap) { 882 MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite); 883 // Only need to update if the inlined callsite had callsite metadata, or if 884 // there was any memprof metadata inlined. 885 if (!CallsiteMD && !ContainsMemProfMetadata) 886 return; 887 888 // Propagate metadata onto the cloned calls in the inlined callee. 889 for (const auto &Entry : VMap) { 890 // See if this is a call that has been inlined and remapped, and not 891 // simplified away in the process. 892 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first); 893 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second); 894 if (!OrigCall || !ClonedCall) 895 continue; 896 // If the inlined callsite did not have any callsite metadata, then it isn't 897 // involved in any profiled call contexts, and we can remove any memprof 898 // metadata on the cloned call. 899 if (!CallsiteMD) { 900 removeMemProfMetadata(ClonedCall); 901 removeCallsiteMetadata(ClonedCall); 902 continue; 903 } 904 propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD); 905 } 906 } 907 908 /// When inlining a call site that has !llvm.mem.parallel_loop_access, 909 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should 910 /// be propagated to all memory-accessing cloned instructions. 911 static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, 912 Function::iterator FEnd) { 913 MDNode *MemParallelLoopAccess = 914 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access); 915 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group); 916 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope); 917 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias); 918 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias) 919 return; 920 921 for (BasicBlock &BB : make_range(FStart, FEnd)) { 922 for (Instruction &I : BB) { 923 // This metadata is only relevant for instructions that access memory. 924 if (!I.mayReadOrWriteMemory()) 925 continue; 926 927 if (MemParallelLoopAccess) { 928 // TODO: This probably should not overwrite MemParalleLoopAccess. 929 MemParallelLoopAccess = MDNode::concatenate( 930 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access), 931 MemParallelLoopAccess); 932 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access, 933 MemParallelLoopAccess); 934 } 935 936 if (AccessGroup) 937 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups( 938 I.getMetadata(LLVMContext::MD_access_group), AccessGroup)); 939 940 if (AliasScope) 941 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate( 942 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope)); 943 944 if (NoAlias) 945 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate( 946 I.getMetadata(LLVMContext::MD_noalias), NoAlias)); 947 } 948 } 949 } 950 951 /// Bundle operands of the inlined function must be added to inlined call sites. 952 static void PropagateOperandBundles(Function::iterator InlinedBB, 953 Instruction *CallSiteEHPad) { 954 for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) { 955 CallBase *I = dyn_cast<CallBase>(&II); 956 if (!I) 957 continue; 958 // Skip call sites which already have a "funclet" bundle. 959 if (I->getOperandBundle(LLVMContext::OB_funclet)) 960 continue; 961 // Skip call sites which are nounwind intrinsics (as long as they don't 962 // lower into regular function calls in the course of IR transformations). 963 auto *CalledFn = 964 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts()); 965 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() && 966 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID())) 967 continue; 968 969 SmallVector<OperandBundleDef, 1> OpBundles; 970 I->getOperandBundlesAsDefs(OpBundles); 971 OpBundles.emplace_back("funclet", CallSiteEHPad); 972 973 Instruction *NewInst = CallBase::Create(I, OpBundles, I); 974 NewInst->takeName(I); 975 I->replaceAllUsesWith(NewInst); 976 I->eraseFromParent(); 977 } 978 } 979 980 namespace { 981 /// Utility for cloning !noalias and !alias.scope metadata. When a code region 982 /// using scoped alias metadata is inlined, the aliasing relationships may not 983 /// hold between the two version. It is necessary to create a deep clone of the 984 /// metadata, putting the two versions in separate scope domains. 985 class ScopedAliasMetadataDeepCloner { 986 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>; 987 SetVector<const MDNode *> MD; 988 MetadataMap MDMap; 989 void addRecursiveMetadataUses(); 990 991 public: 992 ScopedAliasMetadataDeepCloner(const Function *F); 993 994 /// Create a new clone of the scoped alias metadata, which will be used by 995 /// subsequent remap() calls. 996 void clone(); 997 998 /// Remap instructions in the given range from the original to the cloned 999 /// metadata. 1000 void remap(Function::iterator FStart, Function::iterator FEnd); 1001 }; 1002 } // namespace 1003 1004 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner( 1005 const Function *F) { 1006 for (const BasicBlock &BB : *F) { 1007 for (const Instruction &I : BB) { 1008 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope)) 1009 MD.insert(M); 1010 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias)) 1011 MD.insert(M); 1012 1013 // We also need to clone the metadata in noalias intrinsics. 1014 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1015 MD.insert(Decl->getScopeList()); 1016 } 1017 } 1018 addRecursiveMetadataUses(); 1019 } 1020 1021 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() { 1022 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 1023 while (!Queue.empty()) { 1024 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 1025 for (const Metadata *Op : M->operands()) 1026 if (const MDNode *OpMD = dyn_cast<MDNode>(Op)) 1027 if (MD.insert(OpMD)) 1028 Queue.push_back(OpMD); 1029 } 1030 } 1031 1032 void ScopedAliasMetadataDeepCloner::clone() { 1033 assert(MDMap.empty() && "clone() already called ?"); 1034 1035 SmallVector<TempMDTuple, 16> DummyNodes; 1036 for (const MDNode *I : MD) { 1037 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), std::nullopt)); 1038 MDMap[I].reset(DummyNodes.back().get()); 1039 } 1040 1041 // Create new metadata nodes to replace the dummy nodes, replacing old 1042 // metadata references with either a dummy node or an already-created new 1043 // node. 1044 SmallVector<Metadata *, 4> NewOps; 1045 for (const MDNode *I : MD) { 1046 for (const Metadata *Op : I->operands()) { 1047 if (const MDNode *M = dyn_cast<MDNode>(Op)) 1048 NewOps.push_back(MDMap[M]); 1049 else 1050 NewOps.push_back(const_cast<Metadata *>(Op)); 1051 } 1052 1053 MDNode *NewM = MDNode::get(I->getContext(), NewOps); 1054 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 1055 assert(TempM->isTemporary() && "Expected temporary node"); 1056 1057 TempM->replaceAllUsesWith(NewM); 1058 NewOps.clear(); 1059 } 1060 } 1061 1062 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart, 1063 Function::iterator FEnd) { 1064 if (MDMap.empty()) 1065 return; // Nothing to do. 1066 1067 for (BasicBlock &BB : make_range(FStart, FEnd)) { 1068 for (Instruction &I : BB) { 1069 // TODO: The null checks for the MDMap.lookup() results should no longer 1070 // be necessary. 1071 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope)) 1072 if (MDNode *MNew = MDMap.lookup(M)) 1073 I.setMetadata(LLVMContext::MD_alias_scope, MNew); 1074 1075 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias)) 1076 if (MDNode *MNew = MDMap.lookup(M)) 1077 I.setMetadata(LLVMContext::MD_noalias, MNew); 1078 1079 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1080 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList())) 1081 Decl->setScopeList(MNew); 1082 } 1083 } 1084 } 1085 1086 /// If the inlined function has noalias arguments, 1087 /// then add new alias scopes for each noalias argument, tag the mapped noalias 1088 /// parameters with noalias metadata specifying the new scope, and tag all 1089 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 1090 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, 1091 const DataLayout &DL, AAResults *CalleeAAR, 1092 ClonedCodeInfo &InlinedFunctionInfo) { 1093 if (!EnableNoAliasConversion) 1094 return; 1095 1096 const Function *CalledFunc = CB.getCalledFunction(); 1097 SmallVector<const Argument *, 4> NoAliasArgs; 1098 1099 for (const Argument &Arg : CalledFunc->args()) 1100 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty()) 1101 NoAliasArgs.push_back(&Arg); 1102 1103 if (NoAliasArgs.empty()) 1104 return; 1105 1106 // To do a good job, if a noalias variable is captured, we need to know if 1107 // the capture point dominates the particular use we're considering. 1108 DominatorTree DT; 1109 DT.recalculate(const_cast<Function&>(*CalledFunc)); 1110 1111 // noalias indicates that pointer values based on the argument do not alias 1112 // pointer values which are not based on it. So we add a new "scope" for each 1113 // noalias function argument. Accesses using pointers based on that argument 1114 // become part of that alias scope, accesses using pointers not based on that 1115 // argument are tagged as noalias with that scope. 1116 1117 DenseMap<const Argument *, MDNode *> NewScopes; 1118 MDBuilder MDB(CalledFunc->getContext()); 1119 1120 // Create a new scope domain for this function. 1121 MDNode *NewDomain = 1122 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 1123 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 1124 const Argument *A = NoAliasArgs[i]; 1125 1126 std::string Name = std::string(CalledFunc->getName()); 1127 if (A->hasName()) { 1128 Name += ": %"; 1129 Name += A->getName(); 1130 } else { 1131 Name += ": argument "; 1132 Name += utostr(i); 1133 } 1134 1135 // Note: We always create a new anonymous root here. This is true regardless 1136 // of the linkage of the callee because the aliasing "scope" is not just a 1137 // property of the callee, but also all control dependencies in the caller. 1138 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 1139 NewScopes.insert(std::make_pair(A, NewScope)); 1140 1141 if (UseNoAliasIntrinsic) { 1142 // Introduce a llvm.experimental.noalias.scope.decl for the noalias 1143 // argument. 1144 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope); 1145 auto *NoAliasDecl = 1146 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList); 1147 // Ignore the result for now. The result will be used when the 1148 // llvm.noalias intrinsic is introduced. 1149 (void)NoAliasDecl; 1150 } 1151 } 1152 1153 // Iterate over all new instructions in the map; for all memory-access 1154 // instructions, add the alias scope metadata. 1155 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 1156 VMI != VMIE; ++VMI) { 1157 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 1158 if (!VMI->second) 1159 continue; 1160 1161 Instruction *NI = dyn_cast<Instruction>(VMI->second); 1162 if (!NI || InlinedFunctionInfo.isSimplified(I, NI)) 1163 continue; 1164 1165 bool IsArgMemOnlyCall = false, IsFuncCall = false; 1166 SmallVector<const Value *, 2> PtrArgs; 1167 1168 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 1169 PtrArgs.push_back(LI->getPointerOperand()); 1170 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 1171 PtrArgs.push_back(SI->getPointerOperand()); 1172 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 1173 PtrArgs.push_back(VAAI->getPointerOperand()); 1174 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 1175 PtrArgs.push_back(CXI->getPointerOperand()); 1176 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 1177 PtrArgs.push_back(RMWI->getPointerOperand()); 1178 else if (const auto *Call = dyn_cast<CallBase>(I)) { 1179 // If we know that the call does not access memory, then we'll still 1180 // know that about the inlined clone of this call site, and we don't 1181 // need to add metadata. 1182 if (Call->doesNotAccessMemory()) 1183 continue; 1184 1185 IsFuncCall = true; 1186 if (CalleeAAR) { 1187 MemoryEffects ME = CalleeAAR->getMemoryEffects(Call); 1188 1189 // We'll retain this knowledge without additional metadata. 1190 if (ME.onlyAccessesInaccessibleMem()) 1191 continue; 1192 1193 if (ME.onlyAccessesArgPointees()) 1194 IsArgMemOnlyCall = true; 1195 } 1196 1197 for (Value *Arg : Call->args()) { 1198 // Only care about pointer arguments. If a noalias argument is 1199 // accessed through a non-pointer argument, it must be captured 1200 // first (e.g. via ptrtoint), and we protect against captures below. 1201 if (!Arg->getType()->isPointerTy()) 1202 continue; 1203 1204 PtrArgs.push_back(Arg); 1205 } 1206 } 1207 1208 // If we found no pointers, then this instruction is not suitable for 1209 // pairing with an instruction to receive aliasing metadata. 1210 // However, if this is a call, this we might just alias with none of the 1211 // noalias arguments. 1212 if (PtrArgs.empty() && !IsFuncCall) 1213 continue; 1214 1215 // It is possible that there is only one underlying object, but you 1216 // need to go through several PHIs to see it, and thus could be 1217 // repeated in the Objects list. 1218 SmallPtrSet<const Value *, 4> ObjSet; 1219 SmallVector<Metadata *, 4> Scopes, NoAliases; 1220 1221 SmallSetVector<const Argument *, 4> NAPtrArgs; 1222 for (const Value *V : PtrArgs) { 1223 SmallVector<const Value *, 4> Objects; 1224 getUnderlyingObjects(V, Objects, /* LI = */ nullptr); 1225 1226 for (const Value *O : Objects) 1227 ObjSet.insert(O); 1228 } 1229 1230 // Figure out if we're derived from anything that is not a noalias 1231 // argument. 1232 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false, 1233 UsesUnknownObject = false; 1234 for (const Value *V : ObjSet) { 1235 // Is this value a constant that cannot be derived from any pointer 1236 // value (we need to exclude constant expressions, for example, that 1237 // are formed from arithmetic on global symbols). 1238 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1239 isa<ConstantPointerNull>(V) || 1240 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1241 if (IsNonPtrConst) 1242 continue; 1243 1244 // If this is anything other than a noalias argument, then we cannot 1245 // completely describe the aliasing properties using alias.scope 1246 // metadata (and, thus, won't add any). 1247 if (const Argument *A = dyn_cast<Argument>(V)) { 1248 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias)) 1249 UsesAliasingPtr = true; 1250 } else { 1251 UsesAliasingPtr = true; 1252 } 1253 1254 if (isEscapeSource(V)) { 1255 // An escape source can only alias with a noalias argument if it has 1256 // been captured beforehand. 1257 RequiresNoCaptureBefore = true; 1258 } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) { 1259 // If this is neither an escape source, nor some identified object 1260 // (which cannot directly alias a noalias argument), nor some other 1261 // argument (which, by definition, also cannot alias a noalias 1262 // argument), conservatively do not make any assumptions. 1263 UsesUnknownObject = true; 1264 } 1265 } 1266 1267 // Nothing we can do if the used underlying object cannot be reliably 1268 // determined. 1269 if (UsesUnknownObject) 1270 continue; 1271 1272 // A function call can always get captured noalias pointers (via other 1273 // parameters, globals, etc.). 1274 if (IsFuncCall && !IsArgMemOnlyCall) 1275 RequiresNoCaptureBefore = true; 1276 1277 // First, we want to figure out all of the sets with which we definitely 1278 // don't alias. Iterate over all noalias set, and add those for which: 1279 // 1. The noalias argument is not in the set of objects from which we 1280 // definitely derive. 1281 // 2. The noalias argument has not yet been captured. 1282 // An arbitrary function that might load pointers could see captured 1283 // noalias arguments via other noalias arguments or globals, and so we 1284 // must always check for prior capture. 1285 for (const Argument *A : NoAliasArgs) { 1286 if (ObjSet.contains(A)) 1287 continue; // May be based on a noalias argument. 1288 1289 // It might be tempting to skip the PointerMayBeCapturedBefore check if 1290 // A->hasNoCaptureAttr() is true, but this is incorrect because 1291 // nocapture only guarantees that no copies outlive the function, not 1292 // that the value cannot be locally captured. 1293 if (!RequiresNoCaptureBefore || 1294 !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false, 1295 /* StoreCaptures */ false, I, &DT)) 1296 NoAliases.push_back(NewScopes[A]); 1297 } 1298 1299 if (!NoAliases.empty()) 1300 NI->setMetadata(LLVMContext::MD_noalias, 1301 MDNode::concatenate( 1302 NI->getMetadata(LLVMContext::MD_noalias), 1303 MDNode::get(CalledFunc->getContext(), NoAliases))); 1304 1305 // Next, we want to figure out all of the sets to which we might belong. 1306 // We might belong to a set if the noalias argument is in the set of 1307 // underlying objects. If there is some non-noalias argument in our list 1308 // of underlying objects, then we cannot add a scope because the fact 1309 // that some access does not alias with any set of our noalias arguments 1310 // cannot itself guarantee that it does not alias with this access 1311 // (because there is some pointer of unknown origin involved and the 1312 // other access might also depend on this pointer). We also cannot add 1313 // scopes to arbitrary functions unless we know they don't access any 1314 // non-parameter pointer-values. 1315 bool CanAddScopes = !UsesAliasingPtr; 1316 if (CanAddScopes && IsFuncCall) 1317 CanAddScopes = IsArgMemOnlyCall; 1318 1319 if (CanAddScopes) 1320 for (const Argument *A : NoAliasArgs) { 1321 if (ObjSet.count(A)) 1322 Scopes.push_back(NewScopes[A]); 1323 } 1324 1325 if (!Scopes.empty()) 1326 NI->setMetadata( 1327 LLVMContext::MD_alias_scope, 1328 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1329 MDNode::get(CalledFunc->getContext(), Scopes))); 1330 } 1331 } 1332 } 1333 1334 static bool MayContainThrowingOrExitingCall(Instruction *Begin, 1335 Instruction *End) { 1336 1337 assert(Begin->getParent() == End->getParent() && 1338 "Expected to be in same basic block!"); 1339 return !llvm::isGuaranteedToTransferExecutionToSuccessor( 1340 Begin->getIterator(), End->getIterator(), InlinerAttributeWindow + 1); 1341 } 1342 1343 static AttrBuilder IdentifyValidAttributes(CallBase &CB) { 1344 1345 AttrBuilder AB(CB.getContext(), CB.getAttributes().getRetAttrs()); 1346 if (!AB.hasAttributes()) 1347 return AB; 1348 AttrBuilder Valid(CB.getContext()); 1349 // Only allow these white listed attributes to be propagated back to the 1350 // callee. This is because other attributes may only be valid on the call 1351 // itself, i.e. attributes such as signext and zeroext. 1352 if (auto DerefBytes = AB.getDereferenceableBytes()) 1353 Valid.addDereferenceableAttr(DerefBytes); 1354 if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes()) 1355 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes); 1356 if (AB.contains(Attribute::NoAlias)) 1357 Valid.addAttribute(Attribute::NoAlias); 1358 if (AB.contains(Attribute::NonNull)) 1359 Valid.addAttribute(Attribute::NonNull); 1360 return Valid; 1361 } 1362 1363 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) { 1364 AttrBuilder Valid = IdentifyValidAttributes(CB); 1365 if (!Valid.hasAttributes()) 1366 return; 1367 auto *CalledFunction = CB.getCalledFunction(); 1368 auto &Context = CalledFunction->getContext(); 1369 1370 for (auto &BB : *CalledFunction) { 1371 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator()); 1372 if (!RI || !isa<CallBase>(RI->getOperand(0))) 1373 continue; 1374 auto *RetVal = cast<CallBase>(RI->getOperand(0)); 1375 // Check that the cloned RetVal exists and is a call, otherwise we cannot 1376 // add the attributes on the cloned RetVal. Simplification during inlining 1377 // could have transformed the cloned instruction. 1378 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal)); 1379 if (!NewRetVal) 1380 continue; 1381 // Backward propagation of attributes to the returned value may be incorrect 1382 // if it is control flow dependent. 1383 // Consider: 1384 // @callee { 1385 // %rv = call @foo() 1386 // %rv2 = call @bar() 1387 // if (%rv2 != null) 1388 // return %rv2 1389 // if (%rv == null) 1390 // exit() 1391 // return %rv 1392 // } 1393 // caller() { 1394 // %val = call nonnull @callee() 1395 // } 1396 // Here we cannot add the nonnull attribute on either foo or bar. So, we 1397 // limit the check to both RetVal and RI are in the same basic block and 1398 // there are no throwing/exiting instructions between these instructions. 1399 if (RI->getParent() != RetVal->getParent() || 1400 MayContainThrowingOrExitingCall(RetVal, RI)) 1401 continue; 1402 // Add to the existing attributes of NewRetVal, i.e. the cloned call 1403 // instruction. 1404 // NB! When we have the same attribute already existing on NewRetVal, but 1405 // with a differing value, the AttributeList's merge API honours the already 1406 // existing attribute value (i.e. attributes such as dereferenceable, 1407 // dereferenceable_or_null etc). See AttrBuilder::merge for more details. 1408 AttributeList AL = NewRetVal->getAttributes(); 1409 AttributeList NewAL = AL.addRetAttributes(Context, Valid); 1410 NewRetVal->setAttributes(NewAL); 1411 } 1412 } 1413 1414 /// If the inlined function has non-byval align arguments, then 1415 /// add @llvm.assume-based alignment assumptions to preserve this information. 1416 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) { 1417 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1418 return; 1419 1420 AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller()); 1421 auto &DL = CB.getCaller()->getParent()->getDataLayout(); 1422 1423 // To avoid inserting redundant assumptions, we should check for assumptions 1424 // already in the caller. To do this, we might need a DT of the caller. 1425 DominatorTree DT; 1426 bool DTCalculated = false; 1427 1428 Function *CalledFunc = CB.getCalledFunction(); 1429 for (Argument &Arg : CalledFunc->args()) { 1430 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() || 1431 Arg.hasNUses(0)) 1432 continue; 1433 MaybeAlign Alignment = Arg.getParamAlign(); 1434 if (!Alignment) 1435 continue; 1436 1437 if (!DTCalculated) { 1438 DT.recalculate(*CB.getCaller()); 1439 DTCalculated = true; 1440 } 1441 // If we can already prove the asserted alignment in the context of the 1442 // caller, then don't bother inserting the assumption. 1443 Value *ArgVal = CB.getArgOperand(Arg.getArgNo()); 1444 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment) 1445 continue; 1446 1447 CallInst *NewAsmp = IRBuilder<>(&CB).CreateAlignmentAssumption( 1448 DL, ArgVal, Alignment->value()); 1449 AC->registerAssumption(cast<AssumeInst>(NewAsmp)); 1450 } 1451 } 1452 1453 static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, 1454 Module *M, BasicBlock *InsertBlock, 1455 InlineFunctionInfo &IFI, 1456 Function *CalledFunc) { 1457 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1458 1459 Value *Size = 1460 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType)); 1461 1462 // Always generate a memcpy of alignment 1 here because we don't know 1463 // the alignment of the src pointer. Other optimizations can infer 1464 // better alignment. 1465 CallInst *CI = Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src, 1466 /*SrcAlign*/ Align(1), Size); 1467 1468 // The verifier requires that all calls of debug-info-bearing functions 1469 // from debug-info-bearing functions have a debug location (for inlining 1470 // purposes). Assign a dummy location to satisfy the constraint. 1471 if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram()) 1472 if (DISubprogram *SP = CalledFunc->getSubprogram()) 1473 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP)); 1474 } 1475 1476 /// When inlining a call site that has a byval argument, 1477 /// we have to make the implicit memcpy explicit by adding it. 1478 static Value *HandleByValArgument(Type *ByValType, Value *Arg, 1479 Instruction *TheCall, 1480 const Function *CalledFunc, 1481 InlineFunctionInfo &IFI, 1482 MaybeAlign ByValAlignment) { 1483 Function *Caller = TheCall->getFunction(); 1484 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1485 1486 // If the called function is readonly, then it could not mutate the caller's 1487 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1488 // temporary. 1489 if (CalledFunc->onlyReadsMemory()) { 1490 // If the byval argument has a specified alignment that is greater than the 1491 // passed in pointer, then we either have to round up the input pointer or 1492 // give up on this transformation. 1493 if (ByValAlignment.valueOrOne() == 1) 1494 return Arg; 1495 1496 AssumptionCache *AC = 1497 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 1498 1499 // If the pointer is already known to be sufficiently aligned, or if we can 1500 // round it up to a larger alignment, then we don't need a temporary. 1501 if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >= 1502 *ByValAlignment) 1503 return Arg; 1504 1505 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1506 // for code quality, but rarely happens and is required for correctness. 1507 } 1508 1509 // Create the alloca. If we have DataLayout, use nice alignment. 1510 Align Alignment = DL.getPrefTypeAlign(ByValType); 1511 1512 // If the byval had an alignment specified, we *must* use at least that 1513 // alignment, as it is required by the byval argument (and uses of the 1514 // pointer inside the callee). 1515 if (ByValAlignment) 1516 Alignment = std::max(Alignment, *ByValAlignment); 1517 1518 Value *NewAlloca = 1519 new AllocaInst(ByValType, DL.getAllocaAddrSpace(), nullptr, Alignment, 1520 Arg->getName(), &*Caller->begin()->begin()); 1521 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1522 1523 // Uses of the argument in the function should use our new alloca 1524 // instead. 1525 return NewAlloca; 1526 } 1527 1528 // Check whether this Value is used by a lifetime intrinsic. 1529 static bool isUsedByLifetimeMarker(Value *V) { 1530 for (User *U : V->users()) 1531 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) 1532 if (II->isLifetimeStartOrEnd()) 1533 return true; 1534 return false; 1535 } 1536 1537 // Check whether the given alloca already has 1538 // lifetime.start or lifetime.end intrinsics. 1539 static bool hasLifetimeMarkers(AllocaInst *AI) { 1540 Type *Ty = AI->getType(); 1541 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1542 Ty->getPointerAddressSpace()); 1543 if (Ty == Int8PtrTy) 1544 return isUsedByLifetimeMarker(AI); 1545 1546 // Do a scan to find all the casts to i8*. 1547 for (User *U : AI->users()) { 1548 if (U->getType() != Int8PtrTy) continue; 1549 if (U->stripPointerCasts() != AI) continue; 1550 if (isUsedByLifetimeMarker(U)) 1551 return true; 1552 } 1553 return false; 1554 } 1555 1556 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1557 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1558 /// cannot be static. 1559 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1560 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1561 } 1562 1563 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL 1564 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache. 1565 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, 1566 LLVMContext &Ctx, 1567 DenseMap<const MDNode *, MDNode *> &IANodes) { 1568 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes); 1569 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(), 1570 OrigDL.getScope(), IA); 1571 } 1572 1573 /// Update inlined instructions' line numbers to 1574 /// to encode location where these instructions are inlined. 1575 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1576 Instruction *TheCall, bool CalleeHasDebugInfo) { 1577 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1578 if (!TheCallDL) 1579 return; 1580 1581 auto &Ctx = Fn->getContext(); 1582 DILocation *InlinedAtNode = TheCallDL; 1583 1584 // Create a unique call site, not to be confused with any other call from the 1585 // same location. 1586 InlinedAtNode = DILocation::getDistinct( 1587 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1588 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1589 1590 // Cache the inlined-at nodes as they're built so they are reused, without 1591 // this every instruction's inlined-at chain would become distinct from each 1592 // other. 1593 DenseMap<const MDNode *, MDNode *> IANodes; 1594 1595 // Check if we are not generating inline line tables and want to use 1596 // the call site location instead. 1597 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables"); 1598 1599 for (; FI != Fn->end(); ++FI) { 1600 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1601 BI != BE; ++BI) { 1602 // Loop metadata needs to be updated so that the start and end locs 1603 // reference inlined-at locations. 1604 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, 1605 &IANodes](Metadata *MD) -> Metadata * { 1606 if (auto *Loc = dyn_cast_or_null<DILocation>(MD)) 1607 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get(); 1608 return MD; 1609 }; 1610 updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc); 1611 1612 if (!NoInlineLineTables) 1613 if (DebugLoc DL = BI->getDebugLoc()) { 1614 DebugLoc IDL = 1615 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes); 1616 BI->setDebugLoc(IDL); 1617 continue; 1618 } 1619 1620 if (CalleeHasDebugInfo && !NoInlineLineTables) 1621 continue; 1622 1623 // If the inlined instruction has no line number, or if inline info 1624 // is not being generated, make it look as if it originates from the call 1625 // location. This is important for ((__always_inline, __nodebug__)) 1626 // functions which must use caller location for all instructions in their 1627 // function body. 1628 1629 // Don't update static allocas, as they may get moved later. 1630 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1631 if (allocaWouldBeStaticInEntry(AI)) 1632 continue; 1633 1634 // Do not force a debug loc for pseudo probes, since they do not need to 1635 // be debuggable, and also they are expected to have a zero/null dwarf 1636 // discriminator at this point which could be violated otherwise. 1637 if (isa<PseudoProbeInst>(BI)) 1638 continue; 1639 1640 BI->setDebugLoc(TheCallDL); 1641 } 1642 1643 // Remove debug info intrinsics if we're not keeping inline info. 1644 if (NoInlineLineTables) { 1645 BasicBlock::iterator BI = FI->begin(); 1646 while (BI != FI->end()) { 1647 if (isa<DbgInfoIntrinsic>(BI)) { 1648 BI = BI->eraseFromParent(); 1649 continue; 1650 } 1651 ++BI; 1652 } 1653 } 1654 1655 } 1656 } 1657 1658 #undef DEBUG_TYPE 1659 #define DEBUG_TYPE "assignment-tracking" 1660 /// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB. 1661 static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, 1662 const CallBase &CB) { 1663 at::StorageToVarsMap EscapedLocals; 1664 SmallPtrSet<const Value *, 4> SeenBases; 1665 1666 LLVM_DEBUG( 1667 errs() << "# Finding caller local variables escaped by callee\n"); 1668 for (const Value *Arg : CB.args()) { 1669 LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n"); 1670 if (!Arg->getType()->isPointerTy()) { 1671 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n"); 1672 continue; 1673 } 1674 1675 const Instruction *I = dyn_cast<Instruction>(Arg); 1676 if (!I) { 1677 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n"); 1678 continue; 1679 } 1680 1681 // Walk back to the base storage. 1682 assert(Arg->getType()->isPtrOrPtrVectorTy()); 1683 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false); 1684 const AllocaInst *Base = dyn_cast<AllocaInst>( 1685 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true)); 1686 if (!Base) { 1687 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n"); 1688 continue; 1689 } 1690 1691 assert(Base); 1692 LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n"); 1693 // We only need to process each base address once - skip any duplicates. 1694 if (!SeenBases.insert(Base).second) 1695 continue; 1696 1697 // Find all local variables associated with the backing storage. 1698 for (auto *DAI : at::getAssignmentMarkers(Base)) { 1699 // Skip variables from inlined functions - they are not local variables. 1700 if (DAI->getDebugLoc().getInlinedAt()) 1701 continue; 1702 LLVM_DEBUG(errs() << " > DEF : " << *DAI << "\n"); 1703 EscapedLocals[Base].insert(at::VarRecord(DAI)); 1704 } 1705 } 1706 return EscapedLocals; 1707 } 1708 1709 static void trackInlinedStores(Function::iterator Start, Function::iterator End, 1710 const CallBase &CB) { 1711 LLVM_DEBUG(errs() << "trackInlinedStores into " 1712 << Start->getParent()->getName() << " from " 1713 << CB.getCalledFunction()->getName() << "\n"); 1714 std::unique_ptr<DataLayout> DL = std::make_unique<DataLayout>(CB.getModule()); 1715 at::trackAssignments(Start, End, collectEscapedLocals(*DL, CB), *DL); 1716 } 1717 1718 /// Update inlined instructions' DIAssignID metadata. We need to do this 1719 /// otherwise a function inlined more than once into the same function 1720 /// will cause DIAssignID to be shared by many instructions. 1721 static void fixupAssignments(Function::iterator Start, Function::iterator End) { 1722 // Map {Old, New} metadata. Not used directly - use GetNewID. 1723 DenseMap<DIAssignID *, DIAssignID *> Map; 1724 auto GetNewID = [&Map](Metadata *Old) { 1725 DIAssignID *OldID = cast<DIAssignID>(Old); 1726 if (DIAssignID *NewID = Map.lookup(OldID)) 1727 return NewID; 1728 DIAssignID *NewID = DIAssignID::getDistinct(OldID->getContext()); 1729 Map[OldID] = NewID; 1730 return NewID; 1731 }; 1732 // Loop over all the inlined instructions. If we find a DIAssignID 1733 // attachment or use, replace it with a new version. 1734 for (auto BBI = Start; BBI != End; ++BBI) { 1735 for (Instruction &I : *BBI) { 1736 if (auto *ID = I.getMetadata(LLVMContext::MD_DIAssignID)) 1737 I.setMetadata(LLVMContext::MD_DIAssignID, GetNewID(ID)); 1738 else if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&I)) 1739 DAI->setAssignId(GetNewID(DAI->getAssignID())); 1740 } 1741 } 1742 } 1743 #undef DEBUG_TYPE 1744 #define DEBUG_TYPE "inline-function" 1745 1746 /// Update the block frequencies of the caller after a callee has been inlined. 1747 /// 1748 /// Each block cloned into the caller has its block frequency scaled by the 1749 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1750 /// callee's entry block gets the same frequency as the callsite block and the 1751 /// relative frequencies of all cloned blocks remain the same after cloning. 1752 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1753 const ValueToValueMapTy &VMap, 1754 BlockFrequencyInfo *CallerBFI, 1755 BlockFrequencyInfo *CalleeBFI, 1756 const BasicBlock &CalleeEntryBlock) { 1757 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1758 for (auto Entry : VMap) { 1759 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1760 continue; 1761 auto *OrigBB = cast<BasicBlock>(Entry.first); 1762 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1763 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency(); 1764 if (!ClonedBBs.insert(ClonedBB).second) { 1765 // Multiple blocks in the callee might get mapped to one cloned block in 1766 // the caller since we prune the callee as we clone it. When that happens, 1767 // we want to use the maximum among the original blocks' frequencies. 1768 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency(); 1769 if (NewFreq > Freq) 1770 Freq = NewFreq; 1771 } 1772 CallerBFI->setBlockFreq(ClonedBB, Freq); 1773 } 1774 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1775 CallerBFI->setBlockFreqAndScale( 1776 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1777 ClonedBBs); 1778 } 1779 1780 /// Update the branch metadata for cloned call instructions. 1781 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, 1782 const ProfileCount &CalleeEntryCount, 1783 const CallBase &TheCall, ProfileSummaryInfo *PSI, 1784 BlockFrequencyInfo *CallerBFI) { 1785 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1) 1786 return; 1787 auto CallSiteCount = 1788 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt; 1789 int64_t CallCount = 1790 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount()); 1791 updateProfileCallee(Callee, -CallCount, &VMap); 1792 } 1793 1794 void llvm::updateProfileCallee( 1795 Function *Callee, int64_t EntryDelta, 1796 const ValueMap<const Value *, WeakTrackingVH> *VMap) { 1797 auto CalleeCount = Callee->getEntryCount(); 1798 if (!CalleeCount) 1799 return; 1800 1801 const uint64_t PriorEntryCount = CalleeCount->getCount(); 1802 1803 // Since CallSiteCount is an estimate, it could exceed the original callee 1804 // count and has to be set to 0 so guard against underflow. 1805 const uint64_t NewEntryCount = 1806 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount) 1807 ? 0 1808 : PriorEntryCount + EntryDelta; 1809 1810 // During inlining ? 1811 if (VMap) { 1812 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount; 1813 for (auto Entry : *VMap) 1814 if (isa<CallInst>(Entry.first)) 1815 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) 1816 CI->updateProfWeight(CloneEntryCount, PriorEntryCount); 1817 } 1818 1819 if (EntryDelta) { 1820 Callee->setEntryCount(NewEntryCount); 1821 1822 for (BasicBlock &BB : *Callee) 1823 // No need to update the callsite if it is pruned during inlining. 1824 if (!VMap || VMap->count(&BB)) 1825 for (Instruction &I : BB) 1826 if (CallInst *CI = dyn_cast<CallInst>(&I)) 1827 CI->updateProfWeight(NewEntryCount, PriorEntryCount); 1828 } 1829 } 1830 1831 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call 1832 /// result is implicitly consumed by a call to retainRV or claimRV immediately 1833 /// after the call. This function inlines the retainRV/claimRV calls. 1834 /// 1835 /// There are three cases to consider: 1836 /// 1837 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned 1838 /// object in the callee return block, the autoreleaseRV call and the 1839 /// retainRV/claimRV call in the caller cancel out. If the call in the caller 1840 /// is a claimRV call, a call to objc_release is emitted. 1841 /// 1842 /// 2. If there is a call in the callee return block that doesn't have operand 1843 /// bundle "clang.arc.attachedcall", the operand bundle on the original call 1844 /// is transferred to the call in the callee. 1845 /// 1846 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is 1847 /// a retainRV call. 1848 static void 1849 inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, 1850 const SmallVectorImpl<ReturnInst *> &Returns) { 1851 Module *Mod = CB.getModule(); 1852 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function"); 1853 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV, 1854 IsUnsafeClaimRV = !IsRetainRV; 1855 1856 for (auto *RI : Returns) { 1857 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0)); 1858 bool InsertRetainCall = IsRetainRV; 1859 IRBuilder<> Builder(RI->getContext()); 1860 1861 // Walk backwards through the basic block looking for either a matching 1862 // autoreleaseRV call or an unannotated call. 1863 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()), 1864 RI->getParent()->rend()); 1865 for (Instruction &I : llvm::make_early_inc_range(InstRange)) { 1866 // Ignore casts. 1867 if (isa<CastInst>(I)) 1868 continue; 1869 1870 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 1871 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue || 1872 !II->hasNUses(0) || 1873 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd) 1874 break; 1875 1876 // If we've found a matching authoreleaseRV call: 1877 // - If claimRV is attached to the call, insert a call to objc_release 1878 // and erase the autoreleaseRV call. 1879 // - If retainRV is attached to the call, just erase the autoreleaseRV 1880 // call. 1881 if (IsUnsafeClaimRV) { 1882 Builder.SetInsertPoint(II); 1883 Function *IFn = 1884 Intrinsic::getDeclaration(Mod, Intrinsic::objc_release); 1885 Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); 1886 Builder.CreateCall(IFn, BC, ""); 1887 } 1888 II->eraseFromParent(); 1889 InsertRetainCall = false; 1890 break; 1891 } 1892 1893 auto *CI = dyn_cast<CallInst>(&I); 1894 1895 if (!CI) 1896 break; 1897 1898 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd || 1899 objcarc::hasAttachedCallOpBundle(CI)) 1900 break; 1901 1902 // If we've found an unannotated call that defines RetOpnd, add a 1903 // "clang.arc.attachedcall" operand bundle. 1904 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)}; 1905 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs); 1906 auto *NewCall = CallBase::addOperandBundle( 1907 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI); 1908 NewCall->copyMetadata(*CI); 1909 CI->replaceAllUsesWith(NewCall); 1910 CI->eraseFromParent(); 1911 InsertRetainCall = false; 1912 break; 1913 } 1914 1915 if (InsertRetainCall) { 1916 // The retainRV is attached to the call and we've failed to find a 1917 // matching autoreleaseRV or an annotated call in the callee. Emit a call 1918 // to objc_retain. 1919 Builder.SetInsertPoint(RI); 1920 Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain); 1921 Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType()); 1922 Builder.CreateCall(IFn, BC, ""); 1923 } 1924 } 1925 } 1926 1927 /// This function inlines the called function into the basic block of the 1928 /// caller. This returns false if it is not possible to inline this call. 1929 /// The program is still in a well defined state if this occurs though. 1930 /// 1931 /// Note that this only does one level of inlining. For example, if the 1932 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1933 /// exists in the instruction stream. Similarly this will inline a recursive 1934 /// function by one level. 1935 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, 1936 bool MergeAttributes, 1937 AAResults *CalleeAAR, 1938 bool InsertLifetime, 1939 Function *ForwardVarArgsTo) { 1940 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!"); 1941 1942 // FIXME: we don't inline callbr yet. 1943 if (isa<CallBrInst>(CB)) 1944 return InlineResult::failure("We don't inline callbr yet."); 1945 1946 // If IFI has any state in it, zap it before we fill it in. 1947 IFI.reset(); 1948 1949 Function *CalledFunc = CB.getCalledFunction(); 1950 if (!CalledFunc || // Can't inline external function or indirect 1951 CalledFunc->isDeclaration()) // call! 1952 return InlineResult::failure("external or indirect"); 1953 1954 // The inliner does not know how to inline through calls with operand bundles 1955 // in general ... 1956 if (CB.hasOperandBundles()) { 1957 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) { 1958 uint32_t Tag = CB.getOperandBundleAt(i).getTagID(); 1959 // ... but it knows how to inline through "deopt" operand bundles ... 1960 if (Tag == LLVMContext::OB_deopt) 1961 continue; 1962 // ... and "funclet" operand bundles. 1963 if (Tag == LLVMContext::OB_funclet) 1964 continue; 1965 if (Tag == LLVMContext::OB_clang_arc_attachedcall) 1966 continue; 1967 if (Tag == LLVMContext::OB_kcfi) 1968 continue; 1969 1970 return InlineResult::failure("unsupported operand bundle"); 1971 } 1972 } 1973 1974 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1975 // calls that we inline. 1976 bool MarkNoUnwind = CB.doesNotThrow(); 1977 1978 BasicBlock *OrigBB = CB.getParent(); 1979 Function *Caller = OrigBB->getParent(); 1980 1981 // Do not inline strictfp function into non-strictfp one. It would require 1982 // conversion of all FP operations in host function to constrained intrinsics. 1983 if (CalledFunc->getAttributes().hasFnAttr(Attribute::StrictFP) && 1984 !Caller->getAttributes().hasFnAttr(Attribute::StrictFP)) { 1985 return InlineResult::failure("incompatible strictfp attributes"); 1986 } 1987 1988 // GC poses two hazards to inlining, which only occur when the callee has GC: 1989 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1990 // caller. 1991 // 2. If the caller has a differing GC, it is invalid to inline. 1992 if (CalledFunc->hasGC()) { 1993 if (!Caller->hasGC()) 1994 Caller->setGC(CalledFunc->getGC()); 1995 else if (CalledFunc->getGC() != Caller->getGC()) 1996 return InlineResult::failure("incompatible GC"); 1997 } 1998 1999 // Get the personality function from the callee if it contains a landing pad. 2000 Constant *CalledPersonality = 2001 CalledFunc->hasPersonalityFn() 2002 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 2003 : nullptr; 2004 2005 // Find the personality function used by the landing pads of the caller. If it 2006 // exists, then check to see that it matches the personality function used in 2007 // the callee. 2008 Constant *CallerPersonality = 2009 Caller->hasPersonalityFn() 2010 ? Caller->getPersonalityFn()->stripPointerCasts() 2011 : nullptr; 2012 if (CalledPersonality) { 2013 if (!CallerPersonality) 2014 Caller->setPersonalityFn(CalledPersonality); 2015 // If the personality functions match, then we can perform the 2016 // inlining. Otherwise, we can't inline. 2017 // TODO: This isn't 100% true. Some personality functions are proper 2018 // supersets of others and can be used in place of the other. 2019 else if (CalledPersonality != CallerPersonality) 2020 return InlineResult::failure("incompatible personality"); 2021 } 2022 2023 // We need to figure out which funclet the callsite was in so that we may 2024 // properly nest the callee. 2025 Instruction *CallSiteEHPad = nullptr; 2026 if (CallerPersonality) { 2027 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 2028 if (isScopedEHPersonality(Personality)) { 2029 std::optional<OperandBundleUse> ParentFunclet = 2030 CB.getOperandBundle(LLVMContext::OB_funclet); 2031 if (ParentFunclet) 2032 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 2033 2034 // OK, the inlining site is legal. What about the target function? 2035 2036 if (CallSiteEHPad) { 2037 if (Personality == EHPersonality::MSVC_CXX) { 2038 // The MSVC personality cannot tolerate catches getting inlined into 2039 // cleanup funclets. 2040 if (isa<CleanupPadInst>(CallSiteEHPad)) { 2041 // Ok, the call site is within a cleanuppad. Let's check the callee 2042 // for catchpads. 2043 for (const BasicBlock &CalledBB : *CalledFunc) { 2044 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 2045 return InlineResult::failure("catch in cleanup funclet"); 2046 } 2047 } 2048 } else if (isAsynchronousEHPersonality(Personality)) { 2049 // SEH is even less tolerant, there may not be any sort of exceptional 2050 // funclet in the callee. 2051 for (const BasicBlock &CalledBB : *CalledFunc) { 2052 if (CalledBB.isEHPad()) 2053 return InlineResult::failure("SEH in cleanup funclet"); 2054 } 2055 } 2056 } 2057 } 2058 } 2059 2060 // Determine if we are dealing with a call in an EHPad which does not unwind 2061 // to caller. 2062 bool EHPadForCallUnwindsLocally = false; 2063 if (CallSiteEHPad && isa<CallInst>(CB)) { 2064 UnwindDestMemoTy FuncletUnwindMap; 2065 Value *CallSiteUnwindDestToken = 2066 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 2067 2068 EHPadForCallUnwindsLocally = 2069 CallSiteUnwindDestToken && 2070 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 2071 } 2072 2073 // Get an iterator to the last basic block in the function, which will have 2074 // the new function inlined after it. 2075 Function::iterator LastBlock = --Caller->end(); 2076 2077 // Make sure to capture all of the return instructions from the cloned 2078 // function. 2079 SmallVector<ReturnInst*, 8> Returns; 2080 ClonedCodeInfo InlinedFunctionInfo; 2081 Function::iterator FirstNewBlock; 2082 2083 { // Scope to destroy VMap after cloning. 2084 ValueToValueMapTy VMap; 2085 struct ByValInit { 2086 Value *Dst; 2087 Value *Src; 2088 Type *Ty; 2089 }; 2090 // Keep a list of pair (dst, src) to emit byval initializations. 2091 SmallVector<ByValInit, 4> ByValInits; 2092 2093 // When inlining a function that contains noalias scope metadata, 2094 // this metadata needs to be cloned so that the inlined blocks 2095 // have different "unique scopes" at every call site. 2096 // Track the metadata that must be cloned. Do this before other changes to 2097 // the function, so that we do not get in trouble when inlining caller == 2098 // callee. 2099 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction()); 2100 2101 auto &DL = Caller->getParent()->getDataLayout(); 2102 2103 // Calculate the vector of arguments to pass into the function cloner, which 2104 // matches up the formal to the actual argument values. 2105 auto AI = CB.arg_begin(); 2106 unsigned ArgNo = 0; 2107 for (Function::arg_iterator I = CalledFunc->arg_begin(), 2108 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 2109 Value *ActualArg = *AI; 2110 2111 // When byval arguments actually inlined, we need to make the copy implied 2112 // by them explicit. However, we don't do this if the callee is readonly 2113 // or readnone, because the copy would be unneeded: the callee doesn't 2114 // modify the struct. 2115 if (CB.isByValArgument(ArgNo)) { 2116 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg, 2117 &CB, CalledFunc, IFI, 2118 CalledFunc->getParamAlign(ArgNo)); 2119 if (ActualArg != *AI) 2120 ByValInits.push_back( 2121 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)}); 2122 } 2123 2124 VMap[&*I] = ActualArg; 2125 } 2126 2127 // TODO: Remove this when users have been updated to the assume bundles. 2128 // Add alignment assumptions if necessary. We do this before the inlined 2129 // instructions are actually cloned into the caller so that we can easily 2130 // check what will be known at the start of the inlined code. 2131 AddAlignmentAssumptions(CB, IFI); 2132 2133 AssumptionCache *AC = 2134 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 2135 2136 /// Preserve all attributes on of the call and its parameters. 2137 salvageKnowledge(&CB, AC); 2138 2139 // We want the inliner to prune the code as it copies. We would LOVE to 2140 // have no dead or constant instructions leftover after inlining occurs 2141 // (which can happen, e.g., because an argument was constant), but we'll be 2142 // happy with whatever the cloner can do. 2143 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 2144 /*ModuleLevelChanges=*/false, Returns, ".i", 2145 &InlinedFunctionInfo); 2146 // Remember the first block that is newly cloned over. 2147 FirstNewBlock = LastBlock; ++FirstNewBlock; 2148 2149 // Insert retainRV/clainRV runtime calls. 2150 objcarc::ARCInstKind RVCallKind = objcarc::getAttachedARCFunctionKind(&CB); 2151 if (RVCallKind != objcarc::ARCInstKind::None) 2152 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns); 2153 2154 // Updated caller/callee profiles only when requested. For sample loader 2155 // inlining, the context-sensitive inlinee profile doesn't need to be 2156 // subtracted from callee profile, and the inlined clone also doesn't need 2157 // to be scaled based on call site count. 2158 if (IFI.UpdateProfile) { 2159 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) 2160 // Update the BFI of blocks cloned into the caller. 2161 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 2162 CalledFunc->front()); 2163 2164 if (auto Profile = CalledFunc->getEntryCount()) 2165 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI, 2166 IFI.CallerBFI); 2167 } 2168 2169 // Inject byval arguments initialization. 2170 for (ByValInit &Init : ByValInits) 2171 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(), 2172 &*FirstNewBlock, IFI, CalledFunc); 2173 2174 std::optional<OperandBundleUse> ParentDeopt = 2175 CB.getOperandBundle(LLVMContext::OB_deopt); 2176 if (ParentDeopt) { 2177 SmallVector<OperandBundleDef, 2> OpDefs; 2178 2179 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 2180 CallBase *ICS = dyn_cast_or_null<CallBase>(VH); 2181 if (!ICS) 2182 continue; // instruction was DCE'd or RAUW'ed to undef 2183 2184 OpDefs.clear(); 2185 2186 OpDefs.reserve(ICS->getNumOperandBundles()); 2187 2188 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe; 2189 ++COBi) { 2190 auto ChildOB = ICS->getOperandBundleAt(COBi); 2191 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 2192 // If the inlined call has other operand bundles, let them be 2193 OpDefs.emplace_back(ChildOB); 2194 continue; 2195 } 2196 2197 // It may be useful to separate this logic (of handling operand 2198 // bundles) out to a separate "policy" component if this gets crowded. 2199 // Prepend the parent's deoptimization continuation to the newly 2200 // inlined call's deoptimization continuation. 2201 std::vector<Value *> MergedDeoptArgs; 2202 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 2203 ChildOB.Inputs.size()); 2204 2205 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs); 2206 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs); 2207 2208 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 2209 } 2210 2211 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS); 2212 2213 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 2214 // this even if the call returns void. 2215 ICS->replaceAllUsesWith(NewI); 2216 2217 VH = nullptr; 2218 ICS->eraseFromParent(); 2219 } 2220 } 2221 2222 // For 'nodebug' functions, the associated DISubprogram is always null. 2223 // Conservatively avoid propagating the callsite debug location to 2224 // instructions inlined from a function whose DISubprogram is not null. 2225 fixupLineNumbers(Caller, FirstNewBlock, &CB, 2226 CalledFunc->getSubprogram() != nullptr); 2227 2228 if (isAssignmentTrackingEnabled(*Caller->getParent())) { 2229 // Interpret inlined stores to caller-local variables as assignments. 2230 trackInlinedStores(FirstNewBlock, Caller->end(), CB); 2231 2232 // Update DIAssignID metadata attachments and uses so that they are 2233 // unique to this inlined instance. 2234 fixupAssignments(FirstNewBlock, Caller->end()); 2235 } 2236 2237 // Now clone the inlined noalias scope metadata. 2238 SAMetadataCloner.clone(); 2239 SAMetadataCloner.remap(FirstNewBlock, Caller->end()); 2240 2241 // Add noalias metadata if necessary. 2242 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo); 2243 2244 // Clone return attributes on the callsite into the calls within the inlined 2245 // function which feed into its return value. 2246 AddReturnAttributes(CB, VMap); 2247 2248 propagateMemProfMetadata(CalledFunc, CB, 2249 InlinedFunctionInfo.ContainsMemProfMetadata, VMap); 2250 2251 // Propagate metadata on the callsite if necessary. 2252 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end()); 2253 2254 // Register any cloned assumptions. 2255 if (IFI.GetAssumptionCache) 2256 for (BasicBlock &NewBlock : 2257 make_range(FirstNewBlock->getIterator(), Caller->end())) 2258 for (Instruction &I : NewBlock) 2259 if (auto *II = dyn_cast<AssumeInst>(&I)) 2260 IFI.GetAssumptionCache(*Caller).registerAssumption(II); 2261 } 2262 2263 // If there are any alloca instructions in the block that used to be the entry 2264 // block for the callee, move them to the entry block of the caller. First 2265 // calculate which instruction they should be inserted before. We insert the 2266 // instructions at the end of the current alloca list. 2267 { 2268 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 2269 for (BasicBlock::iterator I = FirstNewBlock->begin(), 2270 E = FirstNewBlock->end(); I != E; ) { 2271 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 2272 if (!AI) continue; 2273 2274 // If the alloca is now dead, remove it. This often occurs due to code 2275 // specialization. 2276 if (AI->use_empty()) { 2277 AI->eraseFromParent(); 2278 continue; 2279 } 2280 2281 if (!allocaWouldBeStaticInEntry(AI)) 2282 continue; 2283 2284 // Keep track of the static allocas that we inline into the caller. 2285 IFI.StaticAllocas.push_back(AI); 2286 2287 // Scan for the block of allocas that we can move over, and move them 2288 // all at once. 2289 while (isa<AllocaInst>(I) && 2290 !cast<AllocaInst>(I)->use_empty() && 2291 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 2292 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 2293 ++I; 2294 } 2295 2296 // Transfer all of the allocas over in a block. Using splice means 2297 // that the instructions aren't removed from the symbol table, then 2298 // reinserted. 2299 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock, 2300 AI->getIterator(), I); 2301 } 2302 } 2303 2304 SmallVector<Value*,4> VarArgsToForward; 2305 SmallVector<AttributeSet, 4> VarArgsAttrs; 2306 for (unsigned i = CalledFunc->getFunctionType()->getNumParams(); 2307 i < CB.arg_size(); i++) { 2308 VarArgsToForward.push_back(CB.getArgOperand(i)); 2309 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i)); 2310 } 2311 2312 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 2313 if (InlinedFunctionInfo.ContainsCalls) { 2314 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 2315 if (CallInst *CI = dyn_cast<CallInst>(&CB)) 2316 CallSiteTailKind = CI->getTailCallKind(); 2317 2318 // For inlining purposes, the "notail" marker is the same as no marker. 2319 if (CallSiteTailKind == CallInst::TCK_NoTail) 2320 CallSiteTailKind = CallInst::TCK_None; 2321 2322 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 2323 ++BB) { 2324 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 2325 CallInst *CI = dyn_cast<CallInst>(&I); 2326 if (!CI) 2327 continue; 2328 2329 // Forward varargs from inlined call site to calls to the 2330 // ForwardVarArgsTo function, if requested, and to musttail calls. 2331 if (!VarArgsToForward.empty() && 2332 ((ForwardVarArgsTo && 2333 CI->getCalledFunction() == ForwardVarArgsTo) || 2334 CI->isMustTailCall())) { 2335 // Collect attributes for non-vararg parameters. 2336 AttributeList Attrs = CI->getAttributes(); 2337 SmallVector<AttributeSet, 8> ArgAttrs; 2338 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) { 2339 for (unsigned ArgNo = 0; 2340 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo) 2341 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo)); 2342 } 2343 2344 // Add VarArg attributes. 2345 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end()); 2346 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(), 2347 Attrs.getRetAttrs(), ArgAttrs); 2348 // Add VarArgs to existing parameters. 2349 SmallVector<Value *, 6> Params(CI->args()); 2350 Params.append(VarArgsToForward.begin(), VarArgsToForward.end()); 2351 CallInst *NewCI = CallInst::Create( 2352 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI); 2353 NewCI->setDebugLoc(CI->getDebugLoc()); 2354 NewCI->setAttributes(Attrs); 2355 NewCI->setCallingConv(CI->getCallingConv()); 2356 CI->replaceAllUsesWith(NewCI); 2357 CI->eraseFromParent(); 2358 CI = NewCI; 2359 } 2360 2361 if (Function *F = CI->getCalledFunction()) 2362 InlinedDeoptimizeCalls |= 2363 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 2364 2365 // We need to reduce the strength of any inlined tail calls. For 2366 // musttail, we have to avoid introducing potential unbounded stack 2367 // growth. For example, if functions 'f' and 'g' are mutually recursive 2368 // with musttail, we can inline 'g' into 'f' so long as we preserve 2369 // musttail on the cloned call to 'f'. If either the inlined call site 2370 // or the cloned call site is *not* musttail, the program already has 2371 // one frame of stack growth, so it's safe to remove musttail. Here is 2372 // a table of example transformations: 2373 // 2374 // f -> musttail g -> musttail f ==> f -> musttail f 2375 // f -> musttail g -> tail f ==> f -> tail f 2376 // f -> g -> musttail f ==> f -> f 2377 // f -> g -> tail f ==> f -> f 2378 // 2379 // Inlined notail calls should remain notail calls. 2380 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 2381 if (ChildTCK != CallInst::TCK_NoTail) 2382 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 2383 CI->setTailCallKind(ChildTCK); 2384 InlinedMustTailCalls |= CI->isMustTailCall(); 2385 2386 // Call sites inlined through a 'nounwind' call site should be 2387 // 'nounwind' as well. However, avoid marking call sites explicitly 2388 // where possible. This helps expose more opportunities for CSE after 2389 // inlining, commonly when the callee is an intrinsic. 2390 if (MarkNoUnwind && !CI->doesNotThrow()) 2391 CI->setDoesNotThrow(); 2392 } 2393 } 2394 } 2395 2396 // Leave lifetime markers for the static alloca's, scoping them to the 2397 // function we just inlined. 2398 // We need to insert lifetime intrinsics even at O0 to avoid invalid 2399 // access caused by multithreaded coroutines. The check 2400 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only. 2401 if ((InsertLifetime || Caller->isPresplitCoroutine()) && 2402 !IFI.StaticAllocas.empty()) { 2403 IRBuilder<> builder(&FirstNewBlock->front()); 2404 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 2405 AllocaInst *AI = IFI.StaticAllocas[ai]; 2406 // Don't mark swifterror allocas. They can't have bitcast uses. 2407 if (AI->isSwiftError()) 2408 continue; 2409 2410 // If the alloca is already scoped to something smaller than the whole 2411 // function then there's no need to add redundant, less accurate markers. 2412 if (hasLifetimeMarkers(AI)) 2413 continue; 2414 2415 // Try to determine the size of the allocation. 2416 ConstantInt *AllocaSize = nullptr; 2417 if (ConstantInt *AIArraySize = 2418 dyn_cast<ConstantInt>(AI->getArraySize())) { 2419 auto &DL = Caller->getParent()->getDataLayout(); 2420 Type *AllocaType = AI->getAllocatedType(); 2421 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 2422 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 2423 2424 // Don't add markers for zero-sized allocas. 2425 if (AllocaArraySize == 0) 2426 continue; 2427 2428 // Check that array size doesn't saturate uint64_t and doesn't 2429 // overflow when it's multiplied by type size. 2430 if (!AllocaTypeSize.isScalable() && 2431 AllocaArraySize != std::numeric_limits<uint64_t>::max() && 2432 std::numeric_limits<uint64_t>::max() / AllocaArraySize >= 2433 AllocaTypeSize.getFixedValue()) { 2434 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 2435 AllocaArraySize * AllocaTypeSize); 2436 } 2437 } 2438 2439 builder.CreateLifetimeStart(AI, AllocaSize); 2440 for (ReturnInst *RI : Returns) { 2441 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 2442 // call and a return. The return kills all local allocas. 2443 if (InlinedMustTailCalls && 2444 RI->getParent()->getTerminatingMustTailCall()) 2445 continue; 2446 if (InlinedDeoptimizeCalls && 2447 RI->getParent()->getTerminatingDeoptimizeCall()) 2448 continue; 2449 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 2450 } 2451 } 2452 } 2453 2454 // If the inlined code contained dynamic alloca instructions, wrap the inlined 2455 // code with llvm.stacksave/llvm.stackrestore intrinsics. 2456 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 2457 Module *M = Caller->getParent(); 2458 // Get the two intrinsics we care about. 2459 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 2460 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 2461 2462 // Insert the llvm.stacksave. 2463 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 2464 .CreateCall(StackSave, {}, "savedstack"); 2465 2466 // Insert a call to llvm.stackrestore before any return instructions in the 2467 // inlined function. 2468 for (ReturnInst *RI : Returns) { 2469 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 2470 // call and a return. The return will restore the stack pointer. 2471 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 2472 continue; 2473 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 2474 continue; 2475 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 2476 } 2477 } 2478 2479 // If we are inlining for an invoke instruction, we must make sure to rewrite 2480 // any call instructions into invoke instructions. This is sensitive to which 2481 // funclet pads were top-level in the inlinee, so must be done before 2482 // rewriting the "parent pad" links. 2483 if (auto *II = dyn_cast<InvokeInst>(&CB)) { 2484 BasicBlock *UnwindDest = II->getUnwindDest(); 2485 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 2486 if (isa<LandingPadInst>(FirstNonPHI)) { 2487 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 2488 } else { 2489 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 2490 } 2491 } 2492 2493 // Update the lexical scopes of the new funclets and callsites. 2494 // Anything that had 'none' as its parent is now nested inside the callsite's 2495 // EHPad. 2496 if (CallSiteEHPad) { 2497 for (Function::iterator BB = FirstNewBlock->getIterator(), 2498 E = Caller->end(); 2499 BB != E; ++BB) { 2500 // Add bundle operands to inlined call sites. 2501 PropagateOperandBundles(BB, CallSiteEHPad); 2502 2503 // It is problematic if the inlinee has a cleanupret which unwinds to 2504 // caller and we inline it into a call site which doesn't unwind but into 2505 // an EH pad that does. Such an edge must be dynamically unreachable. 2506 // As such, we replace the cleanupret with unreachable. 2507 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 2508 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 2509 changeToUnreachable(CleanupRet); 2510 2511 Instruction *I = BB->getFirstNonPHI(); 2512 if (!I->isEHPad()) 2513 continue; 2514 2515 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 2516 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 2517 CatchSwitch->setParentPad(CallSiteEHPad); 2518 } else { 2519 auto *FPI = cast<FuncletPadInst>(I); 2520 if (isa<ConstantTokenNone>(FPI->getParentPad())) 2521 FPI->setParentPad(CallSiteEHPad); 2522 } 2523 } 2524 } 2525 2526 if (InlinedDeoptimizeCalls) { 2527 // We need to at least remove the deoptimizing returns from the Return set, 2528 // so that the control flow from those returns does not get merged into the 2529 // caller (but terminate it instead). If the caller's return type does not 2530 // match the callee's return type, we also need to change the return type of 2531 // the intrinsic. 2532 if (Caller->getReturnType() == CB.getType()) { 2533 llvm::erase_if(Returns, [](ReturnInst *RI) { 2534 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 2535 }); 2536 } else { 2537 SmallVector<ReturnInst *, 8> NormalReturns; 2538 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 2539 Caller->getParent(), Intrinsic::experimental_deoptimize, 2540 {Caller->getReturnType()}); 2541 2542 for (ReturnInst *RI : Returns) { 2543 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 2544 if (!DeoptCall) { 2545 NormalReturns.push_back(RI); 2546 continue; 2547 } 2548 2549 // The calling convention on the deoptimize call itself may be bogus, 2550 // since the code we're inlining may have undefined behavior (and may 2551 // never actually execute at runtime); but all 2552 // @llvm.experimental.deoptimize declarations have to have the same 2553 // calling convention in a well-formed module. 2554 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2555 NewDeoptIntrinsic->setCallingConv(CallingConv); 2556 auto *CurBB = RI->getParent(); 2557 RI->eraseFromParent(); 2558 2559 SmallVector<Value *, 4> CallArgs(DeoptCall->args()); 2560 2561 SmallVector<OperandBundleDef, 1> OpBundles; 2562 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2563 auto DeoptAttributes = DeoptCall->getAttributes(); 2564 DeoptCall->eraseFromParent(); 2565 assert(!OpBundles.empty() && 2566 "Expected at least the deopt operand bundle"); 2567 2568 IRBuilder<> Builder(CurBB); 2569 CallInst *NewDeoptCall = 2570 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2571 NewDeoptCall->setCallingConv(CallingConv); 2572 NewDeoptCall->setAttributes(DeoptAttributes); 2573 if (NewDeoptCall->getType()->isVoidTy()) 2574 Builder.CreateRetVoid(); 2575 else 2576 Builder.CreateRet(NewDeoptCall); 2577 } 2578 2579 // Leave behind the normal returns so we can merge control flow. 2580 std::swap(Returns, NormalReturns); 2581 } 2582 } 2583 2584 // Handle any inlined musttail call sites. In order for a new call site to be 2585 // musttail, the source of the clone and the inlined call site must have been 2586 // musttail. Therefore it's safe to return without merging control into the 2587 // phi below. 2588 if (InlinedMustTailCalls) { 2589 // Check if we need to bitcast the result of any musttail calls. 2590 Type *NewRetTy = Caller->getReturnType(); 2591 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy; 2592 2593 // Handle the returns preceded by musttail calls separately. 2594 SmallVector<ReturnInst *, 8> NormalReturns; 2595 for (ReturnInst *RI : Returns) { 2596 CallInst *ReturnedMustTail = 2597 RI->getParent()->getTerminatingMustTailCall(); 2598 if (!ReturnedMustTail) { 2599 NormalReturns.push_back(RI); 2600 continue; 2601 } 2602 if (!NeedBitCast) 2603 continue; 2604 2605 // Delete the old return and any preceding bitcast. 2606 BasicBlock *CurBB = RI->getParent(); 2607 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2608 RI->eraseFromParent(); 2609 if (OldCast) 2610 OldCast->eraseFromParent(); 2611 2612 // Insert a new bitcast and return with the right type. 2613 IRBuilder<> Builder(CurBB); 2614 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2615 } 2616 2617 // Leave behind the normal returns so we can merge control flow. 2618 std::swap(Returns, NormalReturns); 2619 } 2620 2621 // Now that all of the transforms on the inlined code have taken place but 2622 // before we splice the inlined code into the CFG and lose track of which 2623 // blocks were actually inlined, collect the call sites. We only do this if 2624 // call graph updates weren't requested, as those provide value handle based 2625 // tracking of inlined call sites instead. Calls to intrinsics are not 2626 // collected because they are not inlineable. 2627 if (InlinedFunctionInfo.ContainsCalls) { 2628 // Otherwise just collect the raw call sites that were inlined. 2629 for (BasicBlock &NewBB : 2630 make_range(FirstNewBlock->getIterator(), Caller->end())) 2631 for (Instruction &I : NewBB) 2632 if (auto *CB = dyn_cast<CallBase>(&I)) 2633 if (!(CB->getCalledFunction() && 2634 CB->getCalledFunction()->isIntrinsic())) 2635 IFI.InlinedCallSites.push_back(CB); 2636 } 2637 2638 // If we cloned in _exactly one_ basic block, and if that block ends in a 2639 // return instruction, we splice the body of the inlined callee directly into 2640 // the calling basic block. 2641 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2642 // Move all of the instructions right before the call. 2643 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(), 2644 FirstNewBlock->end()); 2645 // Remove the cloned basic block. 2646 Caller->back().eraseFromParent(); 2647 2648 // If the call site was an invoke instruction, add a branch to the normal 2649 // destination. 2650 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 2651 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB); 2652 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2653 } 2654 2655 // If the return instruction returned a value, replace uses of the call with 2656 // uses of the returned value. 2657 if (!CB.use_empty()) { 2658 ReturnInst *R = Returns[0]; 2659 if (&CB == R->getReturnValue()) 2660 CB.replaceAllUsesWith(PoisonValue::get(CB.getType())); 2661 else 2662 CB.replaceAllUsesWith(R->getReturnValue()); 2663 } 2664 // Since we are now done with the Call/Invoke, we can delete it. 2665 CB.eraseFromParent(); 2666 2667 // Since we are now done with the return instruction, delete it also. 2668 Returns[0]->eraseFromParent(); 2669 2670 if (MergeAttributes) 2671 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc); 2672 2673 // We are now done with the inlining. 2674 return InlineResult::success(); 2675 } 2676 2677 // Otherwise, we have the normal case, of more than one block to inline or 2678 // multiple return sites. 2679 2680 // We want to clone the entire callee function into the hole between the 2681 // "starter" and "ender" blocks. How we accomplish this depends on whether 2682 // this is an invoke instruction or a call instruction. 2683 BasicBlock *AfterCallBB; 2684 BranchInst *CreatedBranchToNormalDest = nullptr; 2685 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) { 2686 2687 // Add an unconditional branch to make this look like the CallInst case... 2688 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB); 2689 2690 // Split the basic block. This guarantees that no PHI nodes will have to be 2691 // updated due to new incoming edges, and make the invoke case more 2692 // symmetric to the call case. 2693 AfterCallBB = 2694 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2695 CalledFunc->getName() + ".exit"); 2696 2697 } else { // It's a call 2698 // If this is a call instruction, we need to split the basic block that 2699 // the call lives in. 2700 // 2701 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(), 2702 CalledFunc->getName() + ".exit"); 2703 } 2704 2705 if (IFI.CallerBFI) { 2706 // Copy original BB's block frequency to AfterCallBB 2707 IFI.CallerBFI->setBlockFreq( 2708 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2709 } 2710 2711 // Change the branch that used to go to AfterCallBB to branch to the first 2712 // basic block of the inlined function. 2713 // 2714 Instruction *Br = OrigBB->getTerminator(); 2715 assert(Br && Br->getOpcode() == Instruction::Br && 2716 "splitBasicBlock broken!"); 2717 Br->setOperand(0, &*FirstNewBlock); 2718 2719 // Now that the function is correct, make it a little bit nicer. In 2720 // particular, move the basic blocks inserted from the end of the function 2721 // into the space made by splitting the source basic block. 2722 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock, 2723 Caller->end()); 2724 2725 // Handle all of the return instructions that we just cloned in, and eliminate 2726 // any users of the original call/invoke instruction. 2727 Type *RTy = CalledFunc->getReturnType(); 2728 2729 PHINode *PHI = nullptr; 2730 if (Returns.size() > 1) { 2731 // The PHI node should go at the front of the new basic block to merge all 2732 // possible incoming values. 2733 if (!CB.use_empty()) { 2734 PHI = PHINode::Create(RTy, Returns.size(), CB.getName(), 2735 &AfterCallBB->front()); 2736 // Anything that used the result of the function call should now use the 2737 // PHI node as their operand. 2738 CB.replaceAllUsesWith(PHI); 2739 } 2740 2741 // Loop over all of the return instructions adding entries to the PHI node 2742 // as appropriate. 2743 if (PHI) { 2744 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2745 ReturnInst *RI = Returns[i]; 2746 assert(RI->getReturnValue()->getType() == PHI->getType() && 2747 "Ret value not consistent in function!"); 2748 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2749 } 2750 } 2751 2752 // Add a branch to the merge points and remove return instructions. 2753 DebugLoc Loc; 2754 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2755 ReturnInst *RI = Returns[i]; 2756 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2757 Loc = RI->getDebugLoc(); 2758 BI->setDebugLoc(Loc); 2759 RI->eraseFromParent(); 2760 } 2761 // We need to set the debug location to *somewhere* inside the 2762 // inlined function. The line number may be nonsensical, but the 2763 // instruction will at least be associated with the right 2764 // function. 2765 if (CreatedBranchToNormalDest) 2766 CreatedBranchToNormalDest->setDebugLoc(Loc); 2767 } else if (!Returns.empty()) { 2768 // Otherwise, if there is exactly one return value, just replace anything 2769 // using the return value of the call with the computed value. 2770 if (!CB.use_empty()) { 2771 if (&CB == Returns[0]->getReturnValue()) 2772 CB.replaceAllUsesWith(PoisonValue::get(CB.getType())); 2773 else 2774 CB.replaceAllUsesWith(Returns[0]->getReturnValue()); 2775 } 2776 2777 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2778 BasicBlock *ReturnBB = Returns[0]->getParent(); 2779 ReturnBB->replaceAllUsesWith(AfterCallBB); 2780 2781 // Splice the code from the return block into the block that it will return 2782 // to, which contains the code that was after the call. 2783 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB); 2784 2785 if (CreatedBranchToNormalDest) 2786 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2787 2788 // Delete the return instruction now and empty ReturnBB now. 2789 Returns[0]->eraseFromParent(); 2790 ReturnBB->eraseFromParent(); 2791 } else if (!CB.use_empty()) { 2792 // No returns, but something is using the return value of the call. Just 2793 // nuke the result. 2794 CB.replaceAllUsesWith(PoisonValue::get(CB.getType())); 2795 } 2796 2797 // Since we are now done with the Call/Invoke, we can delete it. 2798 CB.eraseFromParent(); 2799 2800 // If we inlined any musttail calls and the original return is now 2801 // unreachable, delete it. It can only contain a bitcast and ret. 2802 if (InlinedMustTailCalls && pred_empty(AfterCallBB)) 2803 AfterCallBB->eraseFromParent(); 2804 2805 // We should always be able to fold the entry block of the function into the 2806 // single predecessor of the block... 2807 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2808 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2809 2810 // Splice the code entry block into calling block, right before the 2811 // unconditional branch. 2812 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2813 OrigBB->splice(Br->getIterator(), CalleeEntry); 2814 2815 // Remove the unconditional branch. 2816 Br->eraseFromParent(); 2817 2818 // Now we can remove the CalleeEntry block, which is now empty. 2819 CalleeEntry->eraseFromParent(); 2820 2821 // If we inserted a phi node, check to see if it has a single value (e.g. all 2822 // the entries are the same or undef). If so, remove the PHI so it doesn't 2823 // block other optimizations. 2824 if (PHI) { 2825 AssumptionCache *AC = 2826 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr; 2827 auto &DL = Caller->getParent()->getDataLayout(); 2828 if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) { 2829 PHI->replaceAllUsesWith(V); 2830 PHI->eraseFromParent(); 2831 } 2832 } 2833 2834 if (MergeAttributes) 2835 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc); 2836 2837 return InlineResult::success(); 2838 } 2839