xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/InlineFunction.cpp (revision ccb59683b98360afaf5b5bb641a68fea22c68d0b)
1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
31 #include "llvm/Analysis/ObjCARCUtil.h"
32 #include "llvm/Analysis/ProfileSummaryInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfo.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
65 #include "llvm/Transforms/Utils/Cloning.h"
66 #include "llvm/Transforms/Utils/Local.h"
67 #include "llvm/Transforms/Utils/ValueMapper.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstdint>
71 #include <iterator>
72 #include <limits>
73 #include <string>
74 #include <utility>
75 #include <vector>
76 
77 using namespace llvm;
78 using ProfileCount = Function::ProfileCount;
79 
80 static cl::opt<bool>
81 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
82   cl::Hidden,
83   cl::desc("Convert noalias attributes to metadata during inlining."));
84 
85 static cl::opt<bool>
86     UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
87                         cl::init(true),
88                         cl::desc("Use the llvm.experimental.noalias.scope.decl "
89                                  "intrinsic during inlining."));
90 
91 // Disabled by default, because the added alignment assumptions may increase
92 // compile-time and block optimizations. This option is not suitable for use
93 // with frontends that emit comprehensive parameter alignment annotations.
94 static cl::opt<bool>
95 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
96   cl::init(false), cl::Hidden,
97   cl::desc("Convert align attributes to assumptions during inlining."));
98 
99 static cl::opt<bool> UpdateReturnAttributes(
100         "update-return-attrs", cl::init(true), cl::Hidden,
101             cl::desc("Update return attributes on calls within inlined body"));
102 
103 static cl::opt<unsigned> InlinerAttributeWindow(
104     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
105     cl::desc("the maximum number of instructions analyzed for may throw during "
106              "attribute inference in inlined body"),
107     cl::init(4));
108 
109 namespace {
110 
111   /// A class for recording information about inlining a landing pad.
112   class LandingPadInliningInfo {
113     /// Destination of the invoke's unwind.
114     BasicBlock *OuterResumeDest;
115 
116     /// Destination for the callee's resume.
117     BasicBlock *InnerResumeDest = nullptr;
118 
119     /// LandingPadInst associated with the invoke.
120     LandingPadInst *CallerLPad = nullptr;
121 
122     /// PHI for EH values from landingpad insts.
123     PHINode *InnerEHValuesPHI = nullptr;
124 
125     SmallVector<Value*, 8> UnwindDestPHIValues;
126 
127   public:
128     LandingPadInliningInfo(InvokeInst *II)
129         : OuterResumeDest(II->getUnwindDest()) {
130       // If there are PHI nodes in the unwind destination block, we need to keep
131       // track of which values came into them from the invoke before removing
132       // the edge from this block.
133       BasicBlock *InvokeBB = II->getParent();
134       BasicBlock::iterator I = OuterResumeDest->begin();
135       for (; isa<PHINode>(I); ++I) {
136         // Save the value to use for this edge.
137         PHINode *PHI = cast<PHINode>(I);
138         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
139       }
140 
141       CallerLPad = cast<LandingPadInst>(I);
142     }
143 
144     /// The outer unwind destination is the target of
145     /// unwind edges introduced for calls within the inlined function.
146     BasicBlock *getOuterResumeDest() const {
147       return OuterResumeDest;
148     }
149 
150     BasicBlock *getInnerResumeDest();
151 
152     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
153 
154     /// Forward the 'resume' instruction to the caller's landing pad block.
155     /// When the landing pad block has only one predecessor, this is
156     /// a simple branch. When there is more than one predecessor, we need to
157     /// split the landing pad block after the landingpad instruction and jump
158     /// to there.
159     void forwardResume(ResumeInst *RI,
160                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
161 
162     /// Add incoming-PHI values to the unwind destination block for the given
163     /// basic block, using the values for the original invoke's source block.
164     void addIncomingPHIValuesFor(BasicBlock *BB) const {
165       addIncomingPHIValuesForInto(BB, OuterResumeDest);
166     }
167 
168     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
169       BasicBlock::iterator I = dest->begin();
170       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
171         PHINode *phi = cast<PHINode>(I);
172         phi->addIncoming(UnwindDestPHIValues[i], src);
173       }
174     }
175   };
176 
177 } // end anonymous namespace
178 
179 /// Get or create a target for the branch from ResumeInsts.
180 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181   if (InnerResumeDest) return InnerResumeDest;
182 
183   // Split the landing pad.
184   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
185   InnerResumeDest =
186     OuterResumeDest->splitBasicBlock(SplitPoint,
187                                      OuterResumeDest->getName() + ".body");
188 
189   // The number of incoming edges we expect to the inner landing pad.
190   const unsigned PHICapacity = 2;
191 
192   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193   Instruction *InsertPoint = &InnerResumeDest->front();
194   BasicBlock::iterator I = OuterResumeDest->begin();
195   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
196     PHINode *OuterPHI = cast<PHINode>(I);
197     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
198                                         OuterPHI->getName() + ".lpad-body",
199                                         InsertPoint);
200     OuterPHI->replaceAllUsesWith(InnerPHI);
201     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
202   }
203 
204   // Create a PHI for the exception values.
205   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
206                                      "eh.lpad-body", InsertPoint);
207   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
208   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
209 
210   // All done.
211   return InnerResumeDest;
212 }
213 
214 /// Forward the 'resume' instruction to the caller's landing pad block.
215 /// When the landing pad block has only one predecessor, this is a simple
216 /// branch. When there is more than one predecessor, we need to split the
217 /// landing pad block after the landingpad instruction and jump to there.
218 void LandingPadInliningInfo::forwardResume(
219     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
220   BasicBlock *Dest = getInnerResumeDest();
221   BasicBlock *Src = RI->getParent();
222 
223   BranchInst::Create(Dest, Src);
224 
225   // Update the PHIs in the destination. They were inserted in an order which
226   // makes this work.
227   addIncomingPHIValuesForInto(Src, Dest);
228 
229   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
230   RI->eraseFromParent();
231 }
232 
233 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
234 static Value *getParentPad(Value *EHPad) {
235   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
236     return FPI->getParentPad();
237   return cast<CatchSwitchInst>(EHPad)->getParentPad();
238 }
239 
240 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
241 
242 /// Helper for getUnwindDestToken that does the descendant-ward part of
243 /// the search.
244 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
245                                        UnwindDestMemoTy &MemoMap) {
246   SmallVector<Instruction *, 8> Worklist(1, EHPad);
247 
248   while (!Worklist.empty()) {
249     Instruction *CurrentPad = Worklist.pop_back_val();
250     // We only put pads on the worklist that aren't in the MemoMap.  When
251     // we find an unwind dest for a pad we may update its ancestors, but
252     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
253     // so they should never get updated while queued on the worklist.
254     assert(!MemoMap.count(CurrentPad));
255     Value *UnwindDestToken = nullptr;
256     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
257       if (CatchSwitch->hasUnwindDest()) {
258         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
259       } else {
260         // Catchswitch doesn't have a 'nounwind' variant, and one might be
261         // annotated as "unwinds to caller" when really it's nounwind (see
262         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
263         // parent's unwind dest from this.  We can check its catchpads'
264         // descendants, since they might include a cleanuppad with an
265         // "unwinds to caller" cleanupret, which can be trusted.
266         for (auto HI = CatchSwitch->handler_begin(),
267                   HE = CatchSwitch->handler_end();
268              HI != HE && !UnwindDestToken; ++HI) {
269           BasicBlock *HandlerBlock = *HI;
270           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
271           for (User *Child : CatchPad->users()) {
272             // Intentionally ignore invokes here -- since the catchswitch is
273             // marked "unwind to caller", it would be a verifier error if it
274             // contained an invoke which unwinds out of it, so any invoke we'd
275             // encounter must unwind to some child of the catch.
276             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
277               continue;
278 
279             Instruction *ChildPad = cast<Instruction>(Child);
280             auto Memo = MemoMap.find(ChildPad);
281             if (Memo == MemoMap.end()) {
282               // Haven't figured out this child pad yet; queue it.
283               Worklist.push_back(ChildPad);
284               continue;
285             }
286             // We've already checked this child, but might have found that
287             // it offers no proof either way.
288             Value *ChildUnwindDestToken = Memo->second;
289             if (!ChildUnwindDestToken)
290               continue;
291             // We already know the child's unwind dest, which can either
292             // be ConstantTokenNone to indicate unwind to caller, or can
293             // be another child of the catchpad.  Only the former indicates
294             // the unwind dest of the catchswitch.
295             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
296               UnwindDestToken = ChildUnwindDestToken;
297               break;
298             }
299             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
300           }
301         }
302       }
303     } else {
304       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
305       for (User *U : CleanupPad->users()) {
306         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
307           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
308             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
309           else
310             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
311           break;
312         }
313         Value *ChildUnwindDestToken;
314         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
315           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
316         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
317           Instruction *ChildPad = cast<Instruction>(U);
318           auto Memo = MemoMap.find(ChildPad);
319           if (Memo == MemoMap.end()) {
320             // Haven't resolved this child yet; queue it and keep searching.
321             Worklist.push_back(ChildPad);
322             continue;
323           }
324           // We've checked this child, but still need to ignore it if it
325           // had no proof either way.
326           ChildUnwindDestToken = Memo->second;
327           if (!ChildUnwindDestToken)
328             continue;
329         } else {
330           // Not a relevant user of the cleanuppad
331           continue;
332         }
333         // In a well-formed program, the child/invoke must either unwind to
334         // an(other) child of the cleanup, or exit the cleanup.  In the
335         // first case, continue searching.
336         if (isa<Instruction>(ChildUnwindDestToken) &&
337             getParentPad(ChildUnwindDestToken) == CleanupPad)
338           continue;
339         UnwindDestToken = ChildUnwindDestToken;
340         break;
341       }
342     }
343     // If we haven't found an unwind dest for CurrentPad, we may have queued its
344     // children, so move on to the next in the worklist.
345     if (!UnwindDestToken)
346       continue;
347 
348     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
349     // any ancestors of CurrentPad up to but not including UnwindDestToken's
350     // parent pad.  Record this in the memo map, and check to see if the
351     // original EHPad being queried is one of the ones exited.
352     Value *UnwindParent;
353     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
354       UnwindParent = getParentPad(UnwindPad);
355     else
356       UnwindParent = nullptr;
357     bool ExitedOriginalPad = false;
358     for (Instruction *ExitedPad = CurrentPad;
359          ExitedPad && ExitedPad != UnwindParent;
360          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
361       // Skip over catchpads since they just follow their catchswitches.
362       if (isa<CatchPadInst>(ExitedPad))
363         continue;
364       MemoMap[ExitedPad] = UnwindDestToken;
365       ExitedOriginalPad |= (ExitedPad == EHPad);
366     }
367 
368     if (ExitedOriginalPad)
369       return UnwindDestToken;
370 
371     // Continue the search.
372   }
373 
374   // No definitive information is contained within this funclet.
375   return nullptr;
376 }
377 
378 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
379 /// return that pad instruction.  If it unwinds to caller, return
380 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
381 /// return nullptr.
382 ///
383 /// This routine gets invoked for calls in funclets in inlinees when inlining
384 /// an invoke.  Since many funclets don't have calls inside them, it's queried
385 /// on-demand rather than building a map of pads to unwind dests up front.
386 /// Determining a funclet's unwind dest may require recursively searching its
387 /// descendants, and also ancestors and cousins if the descendants don't provide
388 /// an answer.  Since most funclets will have their unwind dest immediately
389 /// available as the unwind dest of a catchswitch or cleanupret, this routine
390 /// searches top-down from the given pad and then up. To avoid worst-case
391 /// quadratic run-time given that approach, it uses a memo map to avoid
392 /// re-processing funclet trees.  The callers that rewrite the IR as they go
393 /// take advantage of this, for correctness, by checking/forcing rewritten
394 /// pads' entries to match the original callee view.
395 static Value *getUnwindDestToken(Instruction *EHPad,
396                                  UnwindDestMemoTy &MemoMap) {
397   // Catchpads unwind to the same place as their catchswitch;
398   // redirct any queries on catchpads so the code below can
399   // deal with just catchswitches and cleanuppads.
400   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
401     EHPad = CPI->getCatchSwitch();
402 
403   // Check if we've already determined the unwind dest for this pad.
404   auto Memo = MemoMap.find(EHPad);
405   if (Memo != MemoMap.end())
406     return Memo->second;
407 
408   // Search EHPad and, if necessary, its descendants.
409   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
410   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
411   if (UnwindDestToken)
412     return UnwindDestToken;
413 
414   // No information is available for this EHPad from itself or any of its
415   // descendants.  An unwind all the way out to a pad in the caller would
416   // need also to agree with the unwind dest of the parent funclet, so
417   // search up the chain to try to find a funclet with information.  Put
418   // null entries in the memo map to avoid re-processing as we go up.
419   MemoMap[EHPad] = nullptr;
420 #ifndef NDEBUG
421   SmallPtrSet<Instruction *, 4> TempMemos;
422   TempMemos.insert(EHPad);
423 #endif
424   Instruction *LastUselessPad = EHPad;
425   Value *AncestorToken;
426   for (AncestorToken = getParentPad(EHPad);
427        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
428        AncestorToken = getParentPad(AncestorToken)) {
429     // Skip over catchpads since they just follow their catchswitches.
430     if (isa<CatchPadInst>(AncestorPad))
431       continue;
432     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
433     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
434     // call to getUnwindDestToken, that would mean that AncestorPad had no
435     // information in itself, its descendants, or its ancestors.  If that
436     // were the case, then we should also have recorded the lack of information
437     // for the descendant that we're coming from.  So assert that we don't
438     // find a null entry in the MemoMap for AncestorPad.
439     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
440     auto AncestorMemo = MemoMap.find(AncestorPad);
441     if (AncestorMemo == MemoMap.end()) {
442       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
443     } else {
444       UnwindDestToken = AncestorMemo->second;
445     }
446     if (UnwindDestToken)
447       break;
448     LastUselessPad = AncestorPad;
449     MemoMap[LastUselessPad] = nullptr;
450 #ifndef NDEBUG
451     TempMemos.insert(LastUselessPad);
452 #endif
453   }
454 
455   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
456   // returned nullptr (and likewise for EHPad and any of its ancestors up to
457   // LastUselessPad), so LastUselessPad has no information from below.  Since
458   // getUnwindDestTokenHelper must investigate all downward paths through
459   // no-information nodes to prove that a node has no information like this,
460   // and since any time it finds information it records it in the MemoMap for
461   // not just the immediately-containing funclet but also any ancestors also
462   // exited, it must be the case that, walking downward from LastUselessPad,
463   // visiting just those nodes which have not been mapped to an unwind dest
464   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
465   // they are just used to keep getUnwindDestTokenHelper from repeating work),
466   // any node visited must have been exhaustively searched with no information
467   // for it found.
468   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
469   while (!Worklist.empty()) {
470     Instruction *UselessPad = Worklist.pop_back_val();
471     auto Memo = MemoMap.find(UselessPad);
472     if (Memo != MemoMap.end() && Memo->second) {
473       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
474       // that it is a funclet that does have information about unwinding to
475       // a particular destination; its parent was a useless pad.
476       // Since its parent has no information, the unwind edge must not escape
477       // the parent, and must target a sibling of this pad.  This local unwind
478       // gives us no information about EHPad.  Leave it and the subtree rooted
479       // at it alone.
480       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
481       continue;
482     }
483     // We know we don't have information for UselesPad.  If it has an entry in
484     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
485     // added on this invocation of getUnwindDestToken; if a previous invocation
486     // recorded nullptr, it would have had to prove that the ancestors of
487     // UselessPad, which include LastUselessPad, had no information, and that
488     // in turn would have required proving that the descendants of
489     // LastUselesPad, which include EHPad, have no information about
490     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
491     // the MemoMap on that invocation, which isn't the case if we got here.
492     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
493     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
494     // information that we'd be contradicting by making a map entry for it
495     // (which is something that getUnwindDestTokenHelper must have proved for
496     // us to get here).  Just assert on is direct users here; the checks in
497     // this downward walk at its descendants will verify that they don't have
498     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
499     // unwind edges or unwind to a sibling).
500     MemoMap[UselessPad] = UnwindDestToken;
501     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
502       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
503       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
504         auto *CatchPad = HandlerBlock->getFirstNonPHI();
505         for (User *U : CatchPad->users()) {
506           assert(
507               (!isa<InvokeInst>(U) ||
508                (getParentPad(
509                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
510                 CatchPad)) &&
511               "Expected useless pad");
512           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
513             Worklist.push_back(cast<Instruction>(U));
514         }
515       }
516     } else {
517       assert(isa<CleanupPadInst>(UselessPad));
518       for (User *U : UselessPad->users()) {
519         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
520         assert((!isa<InvokeInst>(U) ||
521                 (getParentPad(
522                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
523                  UselessPad)) &&
524                "Expected useless pad");
525         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
526           Worklist.push_back(cast<Instruction>(U));
527       }
528     }
529   }
530 
531   return UnwindDestToken;
532 }
533 
534 /// When we inline a basic block into an invoke,
535 /// we have to turn all of the calls that can throw into invokes.
536 /// This function analyze BB to see if there are any calls, and if so,
537 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
538 /// nodes in that block with the values specified in InvokeDestPHIValues.
539 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
540     BasicBlock *BB, BasicBlock *UnwindEdge,
541     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
542   for (Instruction &I : llvm::make_early_inc_range(*BB)) {
543     // We only need to check for function calls: inlined invoke
544     // instructions require no special handling.
545     CallInst *CI = dyn_cast<CallInst>(&I);
546 
547     if (!CI || CI->doesNotThrow())
548       continue;
549 
550     if (CI->isInlineAsm()) {
551       InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
552       if (!IA->canThrow()) {
553         continue;
554       }
555     }
556 
557     // We do not need to (and in fact, cannot) convert possibly throwing calls
558     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
559     // invokes.  The caller's "segment" of the deoptimization continuation
560     // attached to the newly inlined @llvm.experimental_deoptimize
561     // (resp. @llvm.experimental.guard) call should contain the exception
562     // handling logic, if any.
563     if (auto *F = CI->getCalledFunction())
564       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
565           F->getIntrinsicID() == Intrinsic::experimental_guard)
566         continue;
567 
568     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
569       // This call is nested inside a funclet.  If that funclet has an unwind
570       // destination within the inlinee, then unwinding out of this call would
571       // be UB.  Rewriting this call to an invoke which targets the inlined
572       // invoke's unwind dest would give the call's parent funclet multiple
573       // unwind destinations, which is something that subsequent EH table
574       // generation can't handle and that the veirifer rejects.  So when we
575       // see such a call, leave it as a call.
576       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
577       Value *UnwindDestToken =
578           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
579       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
580         continue;
581 #ifndef NDEBUG
582       Instruction *MemoKey;
583       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
584         MemoKey = CatchPad->getCatchSwitch();
585       else
586         MemoKey = FuncletPad;
587       assert(FuncletUnwindMap->count(MemoKey) &&
588              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
589              "must get memoized to avoid confusing later searches");
590 #endif // NDEBUG
591     }
592 
593     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
594     return BB;
595   }
596   return nullptr;
597 }
598 
599 /// If we inlined an invoke site, we need to convert calls
600 /// in the body of the inlined function into invokes.
601 ///
602 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
603 /// block of the inlined code (the last block is the end of the function),
604 /// and InlineCodeInfo is information about the code that got inlined.
605 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
606                                     ClonedCodeInfo &InlinedCodeInfo) {
607   BasicBlock *InvokeDest = II->getUnwindDest();
608 
609   Function *Caller = FirstNewBlock->getParent();
610 
611   // The inlined code is currently at the end of the function, scan from the
612   // start of the inlined code to its end, checking for stuff we need to
613   // rewrite.
614   LandingPadInliningInfo Invoke(II);
615 
616   // Get all of the inlined landing pad instructions.
617   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
618   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
619        I != E; ++I)
620     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
621       InlinedLPads.insert(II->getLandingPadInst());
622 
623   // Append the clauses from the outer landing pad instruction into the inlined
624   // landing pad instructions.
625   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
626   for (LandingPadInst *InlinedLPad : InlinedLPads) {
627     unsigned OuterNum = OuterLPad->getNumClauses();
628     InlinedLPad->reserveClauses(OuterNum);
629     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
630       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
631     if (OuterLPad->isCleanup())
632       InlinedLPad->setCleanup(true);
633   }
634 
635   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
636        BB != E; ++BB) {
637     if (InlinedCodeInfo.ContainsCalls)
638       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
639               &*BB, Invoke.getOuterResumeDest()))
640         // Update any PHI nodes in the exceptional block to indicate that there
641         // is now a new entry in them.
642         Invoke.addIncomingPHIValuesFor(NewBB);
643 
644     // Forward any resumes that are remaining here.
645     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
646       Invoke.forwardResume(RI, InlinedLPads);
647   }
648 
649   // Now that everything is happy, we have one final detail.  The PHI nodes in
650   // the exception destination block still have entries due to the original
651   // invoke instruction. Eliminate these entries (which might even delete the
652   // PHI node) now.
653   InvokeDest->removePredecessor(II->getParent());
654 }
655 
656 /// If we inlined an invoke site, we need to convert calls
657 /// in the body of the inlined function into invokes.
658 ///
659 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
660 /// block of the inlined code (the last block is the end of the function),
661 /// and InlineCodeInfo is information about the code that got inlined.
662 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
663                                ClonedCodeInfo &InlinedCodeInfo) {
664   BasicBlock *UnwindDest = II->getUnwindDest();
665   Function *Caller = FirstNewBlock->getParent();
666 
667   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
668 
669   // If there are PHI nodes in the unwind destination block, we need to keep
670   // track of which values came into them from the invoke before removing the
671   // edge from this block.
672   SmallVector<Value *, 8> UnwindDestPHIValues;
673   BasicBlock *InvokeBB = II->getParent();
674   for (PHINode &PHI : UnwindDest->phis()) {
675     // Save the value to use for this edge.
676     UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
677   }
678 
679   // Add incoming-PHI values to the unwind destination block for the given basic
680   // block, using the values for the original invoke's source block.
681   auto UpdatePHINodes = [&](BasicBlock *Src) {
682     BasicBlock::iterator I = UnwindDest->begin();
683     for (Value *V : UnwindDestPHIValues) {
684       PHINode *PHI = cast<PHINode>(I);
685       PHI->addIncoming(V, Src);
686       ++I;
687     }
688   };
689 
690   // This connects all the instructions which 'unwind to caller' to the invoke
691   // destination.
692   UnwindDestMemoTy FuncletUnwindMap;
693   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
694        BB != E; ++BB) {
695     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
696       if (CRI->unwindsToCaller()) {
697         auto *CleanupPad = CRI->getCleanupPad();
698         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
699         CRI->eraseFromParent();
700         UpdatePHINodes(&*BB);
701         // Finding a cleanupret with an unwind destination would confuse
702         // subsequent calls to getUnwindDestToken, so map the cleanuppad
703         // to short-circuit any such calls and recognize this as an "unwind
704         // to caller" cleanup.
705         assert(!FuncletUnwindMap.count(CleanupPad) ||
706                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
707         FuncletUnwindMap[CleanupPad] =
708             ConstantTokenNone::get(Caller->getContext());
709       }
710     }
711 
712     Instruction *I = BB->getFirstNonPHI();
713     if (!I->isEHPad())
714       continue;
715 
716     Instruction *Replacement = nullptr;
717     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
718       if (CatchSwitch->unwindsToCaller()) {
719         Value *UnwindDestToken;
720         if (auto *ParentPad =
721                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
722           // This catchswitch is nested inside another funclet.  If that
723           // funclet has an unwind destination within the inlinee, then
724           // unwinding out of this catchswitch would be UB.  Rewriting this
725           // catchswitch to unwind to the inlined invoke's unwind dest would
726           // give the parent funclet multiple unwind destinations, which is
727           // something that subsequent EH table generation can't handle and
728           // that the veirifer rejects.  So when we see such a call, leave it
729           // as "unwind to caller".
730           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
731           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
732             continue;
733         } else {
734           // This catchswitch has no parent to inherit constraints from, and
735           // none of its descendants can have an unwind edge that exits it and
736           // targets another funclet in the inlinee.  It may or may not have a
737           // descendant that definitively has an unwind to caller.  In either
738           // case, we'll have to assume that any unwinds out of it may need to
739           // be routed to the caller, so treat it as though it has a definitive
740           // unwind to caller.
741           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
742         }
743         auto *NewCatchSwitch = CatchSwitchInst::Create(
744             CatchSwitch->getParentPad(), UnwindDest,
745             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
746             CatchSwitch);
747         for (BasicBlock *PadBB : CatchSwitch->handlers())
748           NewCatchSwitch->addHandler(PadBB);
749         // Propagate info for the old catchswitch over to the new one in
750         // the unwind map.  This also serves to short-circuit any subsequent
751         // checks for the unwind dest of this catchswitch, which would get
752         // confused if they found the outer handler in the callee.
753         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
754         Replacement = NewCatchSwitch;
755       }
756     } else if (!isa<FuncletPadInst>(I)) {
757       llvm_unreachable("unexpected EHPad!");
758     }
759 
760     if (Replacement) {
761       Replacement->takeName(I);
762       I->replaceAllUsesWith(Replacement);
763       I->eraseFromParent();
764       UpdatePHINodes(&*BB);
765     }
766   }
767 
768   if (InlinedCodeInfo.ContainsCalls)
769     for (Function::iterator BB = FirstNewBlock->getIterator(),
770                             E = Caller->end();
771          BB != E; ++BB)
772       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
773               &*BB, UnwindDest, &FuncletUnwindMap))
774         // Update any PHI nodes in the exceptional block to indicate that there
775         // is now a new entry in them.
776         UpdatePHINodes(NewBB);
777 
778   // Now that everything is happy, we have one final detail.  The PHI nodes in
779   // the exception destination block still have entries due to the original
780   // invoke instruction. Eliminate these entries (which might even delete the
781   // PHI node) now.
782   UnwindDest->removePredecessor(InvokeBB);
783 }
784 
785 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
786 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
787 /// be propagated to all memory-accessing cloned instructions.
788 static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart,
789                                       Function::iterator FEnd) {
790   MDNode *MemParallelLoopAccess =
791       CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
792   MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
793   MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
794   MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
795   if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
796     return;
797 
798   for (BasicBlock &BB : make_range(FStart, FEnd)) {
799     for (Instruction &I : BB) {
800       // This metadata is only relevant for instructions that access memory.
801       if (!I.mayReadOrWriteMemory())
802         continue;
803 
804       if (MemParallelLoopAccess) {
805         // TODO: This probably should not overwrite MemParalleLoopAccess.
806         MemParallelLoopAccess = MDNode::concatenate(
807             I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
808             MemParallelLoopAccess);
809         I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
810                       MemParallelLoopAccess);
811       }
812 
813       if (AccessGroup)
814         I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
815             I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
816 
817       if (AliasScope)
818         I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
819             I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
820 
821       if (NoAlias)
822         I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
823             I.getMetadata(LLVMContext::MD_noalias), NoAlias));
824     }
825   }
826 }
827 
828 /// Bundle operands of the inlined function must be added to inlined call sites.
829 static void PropagateOperandBundles(Function::iterator InlinedBB,
830                                     Instruction *CallSiteEHPad) {
831   for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
832     CallBase *I = dyn_cast<CallBase>(&II);
833     if (!I)
834       continue;
835     // Skip call sites which already have a "funclet" bundle.
836     if (I->getOperandBundle(LLVMContext::OB_funclet))
837       continue;
838     // Skip call sites which are nounwind intrinsics (as long as they don't
839     // lower into regular function calls in the course of IR transformations).
840     auto *CalledFn =
841         dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
842     if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
843         !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
844       continue;
845 
846     SmallVector<OperandBundleDef, 1> OpBundles;
847     I->getOperandBundlesAsDefs(OpBundles);
848     OpBundles.emplace_back("funclet", CallSiteEHPad);
849 
850     Instruction *NewInst = CallBase::Create(I, OpBundles, I);
851     NewInst->takeName(I);
852     I->replaceAllUsesWith(NewInst);
853     I->eraseFromParent();
854   }
855 }
856 
857 namespace {
858 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
859 /// using scoped alias metadata is inlined, the aliasing relationships may not
860 /// hold between the two version. It is necessary to create a deep clone of the
861 /// metadata, putting the two versions in separate scope domains.
862 class ScopedAliasMetadataDeepCloner {
863   using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;
864   SetVector<const MDNode *> MD;
865   MetadataMap MDMap;
866   void addRecursiveMetadataUses();
867 
868 public:
869   ScopedAliasMetadataDeepCloner(const Function *F);
870 
871   /// Create a new clone of the scoped alias metadata, which will be used by
872   /// subsequent remap() calls.
873   void clone();
874 
875   /// Remap instructions in the given range from the original to the cloned
876   /// metadata.
877   void remap(Function::iterator FStart, Function::iterator FEnd);
878 };
879 } // namespace
880 
881 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
882     const Function *F) {
883   for (const BasicBlock &BB : *F) {
884     for (const Instruction &I : BB) {
885       if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
886         MD.insert(M);
887       if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
888         MD.insert(M);
889 
890       // We also need to clone the metadata in noalias intrinsics.
891       if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
892         MD.insert(Decl->getScopeList());
893     }
894   }
895   addRecursiveMetadataUses();
896 }
897 
898 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
899   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
900   while (!Queue.empty()) {
901     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
902     for (const Metadata *Op : M->operands())
903       if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
904         if (MD.insert(OpMD))
905           Queue.push_back(OpMD);
906   }
907 }
908 
909 void ScopedAliasMetadataDeepCloner::clone() {
910   assert(MDMap.empty() && "clone() already called ?");
911 
912   SmallVector<TempMDTuple, 16> DummyNodes;
913   for (const MDNode *I : MD) {
914     DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None));
915     MDMap[I].reset(DummyNodes.back().get());
916   }
917 
918   // Create new metadata nodes to replace the dummy nodes, replacing old
919   // metadata references with either a dummy node or an already-created new
920   // node.
921   SmallVector<Metadata *, 4> NewOps;
922   for (const MDNode *I : MD) {
923     for (const Metadata *Op : I->operands()) {
924       if (const MDNode *M = dyn_cast<MDNode>(Op))
925         NewOps.push_back(MDMap[M]);
926       else
927         NewOps.push_back(const_cast<Metadata *>(Op));
928     }
929 
930     MDNode *NewM = MDNode::get(I->getContext(), NewOps);
931     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
932     assert(TempM->isTemporary() && "Expected temporary node");
933 
934     TempM->replaceAllUsesWith(NewM);
935     NewOps.clear();
936   }
937 }
938 
939 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
940                                           Function::iterator FEnd) {
941   if (MDMap.empty())
942     return; // Nothing to do.
943 
944   for (BasicBlock &BB : make_range(FStart, FEnd)) {
945     for (Instruction &I : BB) {
946       // TODO: The null checks for the MDMap.lookup() results should no longer
947       // be necessary.
948       if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
949         if (MDNode *MNew = MDMap.lookup(M))
950           I.setMetadata(LLVMContext::MD_alias_scope, MNew);
951 
952       if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
953         if (MDNode *MNew = MDMap.lookup(M))
954           I.setMetadata(LLVMContext::MD_noalias, MNew);
955 
956       if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
957         if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
958           Decl->setScopeList(MNew);
959     }
960   }
961 }
962 
963 /// If the inlined function has noalias arguments,
964 /// then add new alias scopes for each noalias argument, tag the mapped noalias
965 /// parameters with noalias metadata specifying the new scope, and tag all
966 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
967 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
968                                   const DataLayout &DL, AAResults *CalleeAAR,
969                                   ClonedCodeInfo &InlinedFunctionInfo) {
970   if (!EnableNoAliasConversion)
971     return;
972 
973   const Function *CalledFunc = CB.getCalledFunction();
974   SmallVector<const Argument *, 4> NoAliasArgs;
975 
976   for (const Argument &Arg : CalledFunc->args())
977     if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
978       NoAliasArgs.push_back(&Arg);
979 
980   if (NoAliasArgs.empty())
981     return;
982 
983   // To do a good job, if a noalias variable is captured, we need to know if
984   // the capture point dominates the particular use we're considering.
985   DominatorTree DT;
986   DT.recalculate(const_cast<Function&>(*CalledFunc));
987 
988   // noalias indicates that pointer values based on the argument do not alias
989   // pointer values which are not based on it. So we add a new "scope" for each
990   // noalias function argument. Accesses using pointers based on that argument
991   // become part of that alias scope, accesses using pointers not based on that
992   // argument are tagged as noalias with that scope.
993 
994   DenseMap<const Argument *, MDNode *> NewScopes;
995   MDBuilder MDB(CalledFunc->getContext());
996 
997   // Create a new scope domain for this function.
998   MDNode *NewDomain =
999     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1000   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1001     const Argument *A = NoAliasArgs[i];
1002 
1003     std::string Name = std::string(CalledFunc->getName());
1004     if (A->hasName()) {
1005       Name += ": %";
1006       Name += A->getName();
1007     } else {
1008       Name += ": argument ";
1009       Name += utostr(i);
1010     }
1011 
1012     // Note: We always create a new anonymous root here. This is true regardless
1013     // of the linkage of the callee because the aliasing "scope" is not just a
1014     // property of the callee, but also all control dependencies in the caller.
1015     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1016     NewScopes.insert(std::make_pair(A, NewScope));
1017 
1018     if (UseNoAliasIntrinsic) {
1019       // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1020       // argument.
1021       MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1022       auto *NoAliasDecl =
1023           IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1024       // Ignore the result for now. The result will be used when the
1025       // llvm.noalias intrinsic is introduced.
1026       (void)NoAliasDecl;
1027     }
1028   }
1029 
1030   // Iterate over all new instructions in the map; for all memory-access
1031   // instructions, add the alias scope metadata.
1032   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1033        VMI != VMIE; ++VMI) {
1034     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1035       if (!VMI->second)
1036         continue;
1037 
1038       Instruction *NI = dyn_cast<Instruction>(VMI->second);
1039       if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1040         continue;
1041 
1042       bool IsArgMemOnlyCall = false, IsFuncCall = false;
1043       SmallVector<const Value *, 2> PtrArgs;
1044 
1045       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1046         PtrArgs.push_back(LI->getPointerOperand());
1047       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1048         PtrArgs.push_back(SI->getPointerOperand());
1049       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1050         PtrArgs.push_back(VAAI->getPointerOperand());
1051       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1052         PtrArgs.push_back(CXI->getPointerOperand());
1053       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1054         PtrArgs.push_back(RMWI->getPointerOperand());
1055       else if (const auto *Call = dyn_cast<CallBase>(I)) {
1056         // If we know that the call does not access memory, then we'll still
1057         // know that about the inlined clone of this call site, and we don't
1058         // need to add metadata.
1059         if (Call->doesNotAccessMemory())
1060           continue;
1061 
1062         IsFuncCall = true;
1063         if (CalleeAAR) {
1064           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1065 
1066           // We'll retain this knowledge without additional metadata.
1067           if (AAResults::onlyAccessesInaccessibleMem(MRB))
1068             continue;
1069 
1070           if (AAResults::onlyAccessesArgPointees(MRB))
1071             IsArgMemOnlyCall = true;
1072         }
1073 
1074         for (Value *Arg : Call->args()) {
1075           // Only care about pointer arguments. If a noalias argument is
1076           // accessed through a non-pointer argument, it must be captured
1077           // first (e.g. via ptrtoint), and we protect against captures below.
1078           if (!Arg->getType()->isPointerTy())
1079             continue;
1080 
1081           PtrArgs.push_back(Arg);
1082         }
1083       }
1084 
1085       // If we found no pointers, then this instruction is not suitable for
1086       // pairing with an instruction to receive aliasing metadata.
1087       // However, if this is a call, this we might just alias with none of the
1088       // noalias arguments.
1089       if (PtrArgs.empty() && !IsFuncCall)
1090         continue;
1091 
1092       // It is possible that there is only one underlying object, but you
1093       // need to go through several PHIs to see it, and thus could be
1094       // repeated in the Objects list.
1095       SmallPtrSet<const Value *, 4> ObjSet;
1096       SmallVector<Metadata *, 4> Scopes, NoAliases;
1097 
1098       SmallSetVector<const Argument *, 4> NAPtrArgs;
1099       for (const Value *V : PtrArgs) {
1100         SmallVector<const Value *, 4> Objects;
1101         getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1102 
1103         for (const Value *O : Objects)
1104           ObjSet.insert(O);
1105       }
1106 
1107       // Figure out if we're derived from anything that is not a noalias
1108       // argument.
1109       bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1110            UsesUnknownObject = false;
1111       for (const Value *V : ObjSet) {
1112         // Is this value a constant that cannot be derived from any pointer
1113         // value (we need to exclude constant expressions, for example, that
1114         // are formed from arithmetic on global symbols).
1115         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1116                              isa<ConstantPointerNull>(V) ||
1117                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1118         if (IsNonPtrConst)
1119           continue;
1120 
1121         // If this is anything other than a noalias argument, then we cannot
1122         // completely describe the aliasing properties using alias.scope
1123         // metadata (and, thus, won't add any).
1124         if (const Argument *A = dyn_cast<Argument>(V)) {
1125           if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1126             UsesAliasingPtr = true;
1127         } else {
1128           UsesAliasingPtr = true;
1129         }
1130 
1131         if (isEscapeSource(V)) {
1132           // An escape source can only alias with a noalias argument if it has
1133           // been captured beforehand.
1134           RequiresNoCaptureBefore = true;
1135         } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1136           // If this is neither an escape source, nor some identified object
1137           // (which cannot directly alias a noalias argument), nor some other
1138           // argument (which, by definition, also cannot alias a noalias
1139           // argument), conservatively do not make any assumptions.
1140           UsesUnknownObject = true;
1141         }
1142       }
1143 
1144       // Nothing we can do if the used underlying object cannot be reliably
1145       // determined.
1146       if (UsesUnknownObject)
1147         continue;
1148 
1149       // A function call can always get captured noalias pointers (via other
1150       // parameters, globals, etc.).
1151       if (IsFuncCall && !IsArgMemOnlyCall)
1152         RequiresNoCaptureBefore = true;
1153 
1154       // First, we want to figure out all of the sets with which we definitely
1155       // don't alias. Iterate over all noalias set, and add those for which:
1156       //   1. The noalias argument is not in the set of objects from which we
1157       //      definitely derive.
1158       //   2. The noalias argument has not yet been captured.
1159       // An arbitrary function that might load pointers could see captured
1160       // noalias arguments via other noalias arguments or globals, and so we
1161       // must always check for prior capture.
1162       for (const Argument *A : NoAliasArgs) {
1163         if (ObjSet.contains(A))
1164           continue; // May be based on a noalias argument.
1165 
1166         // It might be tempting to skip the PointerMayBeCapturedBefore check if
1167         // A->hasNoCaptureAttr() is true, but this is incorrect because
1168         // nocapture only guarantees that no copies outlive the function, not
1169         // that the value cannot be locally captured.
1170         if (!RequiresNoCaptureBefore ||
1171             !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false,
1172                                         /* StoreCaptures */ false, I, &DT))
1173           NoAliases.push_back(NewScopes[A]);
1174       }
1175 
1176       if (!NoAliases.empty())
1177         NI->setMetadata(LLVMContext::MD_noalias,
1178                         MDNode::concatenate(
1179                             NI->getMetadata(LLVMContext::MD_noalias),
1180                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1181 
1182       // Next, we want to figure out all of the sets to which we might belong.
1183       // We might belong to a set if the noalias argument is in the set of
1184       // underlying objects. If there is some non-noalias argument in our list
1185       // of underlying objects, then we cannot add a scope because the fact
1186       // that some access does not alias with any set of our noalias arguments
1187       // cannot itself guarantee that it does not alias with this access
1188       // (because there is some pointer of unknown origin involved and the
1189       // other access might also depend on this pointer). We also cannot add
1190       // scopes to arbitrary functions unless we know they don't access any
1191       // non-parameter pointer-values.
1192       bool CanAddScopes = !UsesAliasingPtr;
1193       if (CanAddScopes && IsFuncCall)
1194         CanAddScopes = IsArgMemOnlyCall;
1195 
1196       if (CanAddScopes)
1197         for (const Argument *A : NoAliasArgs) {
1198           if (ObjSet.count(A))
1199             Scopes.push_back(NewScopes[A]);
1200         }
1201 
1202       if (!Scopes.empty())
1203         NI->setMetadata(
1204             LLVMContext::MD_alias_scope,
1205             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1206                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1207     }
1208   }
1209 }
1210 
1211 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1212                                             Instruction *End) {
1213 
1214   assert(Begin->getParent() == End->getParent() &&
1215          "Expected to be in same basic block!");
1216   return !llvm::isGuaranteedToTransferExecutionToSuccessor(
1217       Begin->getIterator(), End->getIterator(), InlinerAttributeWindow + 1);
1218 }
1219 
1220 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1221 
1222   AttrBuilder AB(CB.getContext(), CB.getAttributes().getRetAttrs());
1223   if (!AB.hasAttributes())
1224     return AB;
1225   AttrBuilder Valid(CB.getContext());
1226   // Only allow these white listed attributes to be propagated back to the
1227   // callee. This is because other attributes may only be valid on the call
1228   // itself, i.e. attributes such as signext and zeroext.
1229   if (auto DerefBytes = AB.getDereferenceableBytes())
1230     Valid.addDereferenceableAttr(DerefBytes);
1231   if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1232     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1233   if (AB.contains(Attribute::NoAlias))
1234     Valid.addAttribute(Attribute::NoAlias);
1235   if (AB.contains(Attribute::NonNull))
1236     Valid.addAttribute(Attribute::NonNull);
1237   return Valid;
1238 }
1239 
1240 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1241   if (!UpdateReturnAttributes)
1242     return;
1243 
1244   AttrBuilder Valid = IdentifyValidAttributes(CB);
1245   if (!Valid.hasAttributes())
1246     return;
1247   auto *CalledFunction = CB.getCalledFunction();
1248   auto &Context = CalledFunction->getContext();
1249 
1250   for (auto &BB : *CalledFunction) {
1251     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1252     if (!RI || !isa<CallBase>(RI->getOperand(0)))
1253       continue;
1254     auto *RetVal = cast<CallBase>(RI->getOperand(0));
1255     // Check that the cloned RetVal exists and is a call, otherwise we cannot
1256     // add the attributes on the cloned RetVal. Simplification during inlining
1257     // could have transformed the cloned instruction.
1258     auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1259     if (!NewRetVal)
1260       continue;
1261     // Backward propagation of attributes to the returned value may be incorrect
1262     // if it is control flow dependent.
1263     // Consider:
1264     // @callee {
1265     //  %rv = call @foo()
1266     //  %rv2 = call @bar()
1267     //  if (%rv2 != null)
1268     //    return %rv2
1269     //  if (%rv == null)
1270     //    exit()
1271     //  return %rv
1272     // }
1273     // caller() {
1274     //   %val = call nonnull @callee()
1275     // }
1276     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1277     // limit the check to both RetVal and RI are in the same basic block and
1278     // there are no throwing/exiting instructions between these instructions.
1279     if (RI->getParent() != RetVal->getParent() ||
1280         MayContainThrowingOrExitingCall(RetVal, RI))
1281       continue;
1282     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1283     // instruction.
1284     // NB! When we have the same attribute already existing on NewRetVal, but
1285     // with a differing value, the AttributeList's merge API honours the already
1286     // existing attribute value (i.e. attributes such as dereferenceable,
1287     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1288     AttributeList AL = NewRetVal->getAttributes();
1289     AttributeList NewAL = AL.addRetAttributes(Context, Valid);
1290     NewRetVal->setAttributes(NewAL);
1291   }
1292 }
1293 
1294 /// If the inlined function has non-byval align arguments, then
1295 /// add @llvm.assume-based alignment assumptions to preserve this information.
1296 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1297   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1298     return;
1299 
1300   AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1301   auto &DL = CB.getCaller()->getParent()->getDataLayout();
1302 
1303   // To avoid inserting redundant assumptions, we should check for assumptions
1304   // already in the caller. To do this, we might need a DT of the caller.
1305   DominatorTree DT;
1306   bool DTCalculated = false;
1307 
1308   Function *CalledFunc = CB.getCalledFunction();
1309   for (Argument &Arg : CalledFunc->args()) {
1310     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1311     if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1312       if (!DTCalculated) {
1313         DT.recalculate(*CB.getCaller());
1314         DTCalculated = true;
1315       }
1316 
1317       // If we can already prove the asserted alignment in the context of the
1318       // caller, then don't bother inserting the assumption.
1319       Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1320       if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1321         continue;
1322 
1323       CallInst *NewAsmp =
1324           IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1325       AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1326     }
1327   }
1328 }
1329 
1330 /// Once we have cloned code over from a callee into the caller,
1331 /// update the specified callgraph to reflect the changes we made.
1332 /// Note that it's possible that not all code was copied over, so only
1333 /// some edges of the callgraph may remain.
1334 static void UpdateCallGraphAfterInlining(CallBase &CB,
1335                                          Function::iterator FirstNewBlock,
1336                                          ValueToValueMapTy &VMap,
1337                                          InlineFunctionInfo &IFI) {
1338   CallGraph &CG = *IFI.CG;
1339   const Function *Caller = CB.getCaller();
1340   const Function *Callee = CB.getCalledFunction();
1341   CallGraphNode *CalleeNode = CG[Callee];
1342   CallGraphNode *CallerNode = CG[Caller];
1343 
1344   // Since we inlined some uninlined call sites in the callee into the caller,
1345   // add edges from the caller to all of the callees of the callee.
1346   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1347 
1348   // Consider the case where CalleeNode == CallerNode.
1349   CallGraphNode::CalledFunctionsVector CallCache;
1350   if (CalleeNode == CallerNode) {
1351     CallCache.assign(I, E);
1352     I = CallCache.begin();
1353     E = CallCache.end();
1354   }
1355 
1356   for (; I != E; ++I) {
1357     // Skip 'refererence' call records.
1358     if (!I->first)
1359       continue;
1360 
1361     const Value *OrigCall = *I->first;
1362 
1363     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1364     // Only copy the edge if the call was inlined!
1365     if (VMI == VMap.end() || VMI->second == nullptr)
1366       continue;
1367 
1368     // If the call was inlined, but then constant folded, there is no edge to
1369     // add.  Check for this case.
1370     auto *NewCall = dyn_cast<CallBase>(VMI->second);
1371     if (!NewCall)
1372       continue;
1373 
1374     // We do not treat intrinsic calls like real function calls because we
1375     // expect them to become inline code; do not add an edge for an intrinsic.
1376     if (NewCall->getCalledFunction() &&
1377         NewCall->getCalledFunction()->isIntrinsic())
1378       continue;
1379 
1380     // Remember that this call site got inlined for the client of
1381     // InlineFunction.
1382     IFI.InlinedCalls.push_back(NewCall);
1383 
1384     // It's possible that inlining the callsite will cause it to go from an
1385     // indirect to a direct call by resolving a function pointer.  If this
1386     // happens, set the callee of the new call site to a more precise
1387     // destination.  This can also happen if the call graph node of the caller
1388     // was just unnecessarily imprecise.
1389     if (!I->second->getFunction())
1390       if (Function *F = NewCall->getCalledFunction()) {
1391         // Indirect call site resolved to direct call.
1392         CallerNode->addCalledFunction(NewCall, CG[F]);
1393 
1394         continue;
1395       }
1396 
1397     CallerNode->addCalledFunction(NewCall, I->second);
1398   }
1399 
1400   // Update the call graph by deleting the edge from Callee to Caller.  We must
1401   // do this after the loop above in case Caller and Callee are the same.
1402   CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1403 }
1404 
1405 static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1406                                     Module *M, BasicBlock *InsertBlock,
1407                                     InlineFunctionInfo &IFI) {
1408   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1409 
1410   Value *Size =
1411       Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1412 
1413   // Always generate a memcpy of alignment 1 here because we don't know
1414   // the alignment of the src pointer.  Other optimizations can infer
1415   // better alignment.
1416   Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1417                        /*SrcAlign*/ Align(1), Size);
1418 }
1419 
1420 /// When inlining a call site that has a byval argument,
1421 /// we have to make the implicit memcpy explicit by adding it.
1422 static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1423                                   Instruction *TheCall,
1424                                   const Function *CalledFunc,
1425                                   InlineFunctionInfo &IFI,
1426                                   unsigned ByValAlignment) {
1427   assert(cast<PointerType>(Arg->getType())
1428              ->isOpaqueOrPointeeTypeMatches(ByValType));
1429   Function *Caller = TheCall->getFunction();
1430   const DataLayout &DL = Caller->getParent()->getDataLayout();
1431 
1432   // If the called function is readonly, then it could not mutate the caller's
1433   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1434   // temporary.
1435   if (CalledFunc->onlyReadsMemory()) {
1436     // If the byval argument has a specified alignment that is greater than the
1437     // passed in pointer, then we either have to round up the input pointer or
1438     // give up on this transformation.
1439     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1440       return Arg;
1441 
1442     AssumptionCache *AC =
1443         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1444 
1445     // If the pointer is already known to be sufficiently aligned, or if we can
1446     // round it up to a larger alignment, then we don't need a temporary.
1447     if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1448                                    AC) >= ByValAlignment)
1449       return Arg;
1450 
1451     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1452     // for code quality, but rarely happens and is required for correctness.
1453   }
1454 
1455   // Create the alloca.  If we have DataLayout, use nice alignment.
1456   Align Alignment(DL.getPrefTypeAlignment(ByValType));
1457 
1458   // If the byval had an alignment specified, we *must* use at least that
1459   // alignment, as it is required by the byval argument (and uses of the
1460   // pointer inside the callee).
1461   if (ByValAlignment > 0)
1462     Alignment = std::max(Alignment, Align(ByValAlignment));
1463 
1464   Value *NewAlloca =
1465       new AllocaInst(ByValType, DL.getAllocaAddrSpace(), nullptr, Alignment,
1466                      Arg->getName(), &*Caller->begin()->begin());
1467   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1468 
1469   // Uses of the argument in the function should use our new alloca
1470   // instead.
1471   return NewAlloca;
1472 }
1473 
1474 // Check whether this Value is used by a lifetime intrinsic.
1475 static bool isUsedByLifetimeMarker(Value *V) {
1476   for (User *U : V->users())
1477     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1478       if (II->isLifetimeStartOrEnd())
1479         return true;
1480   return false;
1481 }
1482 
1483 // Check whether the given alloca already has
1484 // lifetime.start or lifetime.end intrinsics.
1485 static bool hasLifetimeMarkers(AllocaInst *AI) {
1486   Type *Ty = AI->getType();
1487   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1488                                        Ty->getPointerAddressSpace());
1489   if (Ty == Int8PtrTy)
1490     return isUsedByLifetimeMarker(AI);
1491 
1492   // Do a scan to find all the casts to i8*.
1493   for (User *U : AI->users()) {
1494     if (U->getType() != Int8PtrTy) continue;
1495     if (U->stripPointerCasts() != AI) continue;
1496     if (isUsedByLifetimeMarker(U))
1497       return true;
1498   }
1499   return false;
1500 }
1501 
1502 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1503 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1504 /// cannot be static.
1505 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1506   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1507 }
1508 
1509 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1510 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1511 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1512                                LLVMContext &Ctx,
1513                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1514   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1515   return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1516                          OrigDL.getScope(), IA);
1517 }
1518 
1519 /// Update inlined instructions' line numbers to
1520 /// to encode location where these instructions are inlined.
1521 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1522                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1523   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1524   if (!TheCallDL)
1525     return;
1526 
1527   auto &Ctx = Fn->getContext();
1528   DILocation *InlinedAtNode = TheCallDL;
1529 
1530   // Create a unique call site, not to be confused with any other call from the
1531   // same location.
1532   InlinedAtNode = DILocation::getDistinct(
1533       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1534       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1535 
1536   // Cache the inlined-at nodes as they're built so they are reused, without
1537   // this every instruction's inlined-at chain would become distinct from each
1538   // other.
1539   DenseMap<const MDNode *, MDNode *> IANodes;
1540 
1541   // Check if we are not generating inline line tables and want to use
1542   // the call site location instead.
1543   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1544 
1545   for (; FI != Fn->end(); ++FI) {
1546     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1547          BI != BE; ++BI) {
1548       // Loop metadata needs to be updated so that the start and end locs
1549       // reference inlined-at locations.
1550       auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1551                                 &IANodes](Metadata *MD) -> Metadata * {
1552         if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1553           return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1554         return MD;
1555       };
1556       updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1557 
1558       if (!NoInlineLineTables)
1559         if (DebugLoc DL = BI->getDebugLoc()) {
1560           DebugLoc IDL =
1561               inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1562           BI->setDebugLoc(IDL);
1563           continue;
1564         }
1565 
1566       if (CalleeHasDebugInfo && !NoInlineLineTables)
1567         continue;
1568 
1569       // If the inlined instruction has no line number, or if inline info
1570       // is not being generated, make it look as if it originates from the call
1571       // location. This is important for ((__always_inline, __nodebug__))
1572       // functions which must use caller location for all instructions in their
1573       // function body.
1574 
1575       // Don't update static allocas, as they may get moved later.
1576       if (auto *AI = dyn_cast<AllocaInst>(BI))
1577         if (allocaWouldBeStaticInEntry(AI))
1578           continue;
1579 
1580       BI->setDebugLoc(TheCallDL);
1581     }
1582 
1583     // Remove debug info intrinsics if we're not keeping inline info.
1584     if (NoInlineLineTables) {
1585       BasicBlock::iterator BI = FI->begin();
1586       while (BI != FI->end()) {
1587         if (isa<DbgInfoIntrinsic>(BI)) {
1588           BI = BI->eraseFromParent();
1589           continue;
1590         }
1591         ++BI;
1592       }
1593     }
1594 
1595   }
1596 }
1597 
1598 /// Update the block frequencies of the caller after a callee has been inlined.
1599 ///
1600 /// Each block cloned into the caller has its block frequency scaled by the
1601 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1602 /// callee's entry block gets the same frequency as the callsite block and the
1603 /// relative frequencies of all cloned blocks remain the same after cloning.
1604 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1605                             const ValueToValueMapTy &VMap,
1606                             BlockFrequencyInfo *CallerBFI,
1607                             BlockFrequencyInfo *CalleeBFI,
1608                             const BasicBlock &CalleeEntryBlock) {
1609   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1610   for (auto Entry : VMap) {
1611     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1612       continue;
1613     auto *OrigBB = cast<BasicBlock>(Entry.first);
1614     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1615     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1616     if (!ClonedBBs.insert(ClonedBB).second) {
1617       // Multiple blocks in the callee might get mapped to one cloned block in
1618       // the caller since we prune the callee as we clone it. When that happens,
1619       // we want to use the maximum among the original blocks' frequencies.
1620       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1621       if (NewFreq > Freq)
1622         Freq = NewFreq;
1623     }
1624     CallerBFI->setBlockFreq(ClonedBB, Freq);
1625   }
1626   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1627   CallerBFI->setBlockFreqAndScale(
1628       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1629       ClonedBBs);
1630 }
1631 
1632 /// Update the branch metadata for cloned call instructions.
1633 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1634                               const ProfileCount &CalleeEntryCount,
1635                               const CallBase &TheCall, ProfileSummaryInfo *PSI,
1636                               BlockFrequencyInfo *CallerBFI) {
1637   if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
1638     return;
1639   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1640   int64_t CallCount =
1641       std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
1642   updateProfileCallee(Callee, -CallCount, &VMap);
1643 }
1644 
1645 void llvm::updateProfileCallee(
1646     Function *Callee, int64_t EntryDelta,
1647     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1648   auto CalleeCount = Callee->getEntryCount();
1649   if (!CalleeCount)
1650     return;
1651 
1652   const uint64_t PriorEntryCount = CalleeCount->getCount();
1653 
1654   // Since CallSiteCount is an estimate, it could exceed the original callee
1655   // count and has to be set to 0 so guard against underflow.
1656   const uint64_t NewEntryCount =
1657       (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1658           ? 0
1659           : PriorEntryCount + EntryDelta;
1660 
1661   // During inlining ?
1662   if (VMap) {
1663     uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1664     for (auto Entry : *VMap)
1665       if (isa<CallInst>(Entry.first))
1666         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1667           CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1668   }
1669 
1670   if (EntryDelta) {
1671     Callee->setEntryCount(NewEntryCount);
1672 
1673     for (BasicBlock &BB : *Callee)
1674       // No need to update the callsite if it is pruned during inlining.
1675       if (!VMap || VMap->count(&BB))
1676         for (Instruction &I : BB)
1677           if (CallInst *CI = dyn_cast<CallInst>(&I))
1678             CI->updateProfWeight(NewEntryCount, PriorEntryCount);
1679   }
1680 }
1681 
1682 /// An operand bundle "clang.arc.attachedcall" on a call indicates the call
1683 /// result is implicitly consumed by a call to retainRV or claimRV immediately
1684 /// after the call. This function inlines the retainRV/claimRV calls.
1685 ///
1686 /// There are three cases to consider:
1687 ///
1688 /// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
1689 ///    object in the callee return block, the autoreleaseRV call and the
1690 ///    retainRV/claimRV call in the caller cancel out. If the call in the caller
1691 ///    is a claimRV call, a call to objc_release is emitted.
1692 ///
1693 /// 2. If there is a call in the callee return block that doesn't have operand
1694 ///    bundle "clang.arc.attachedcall", the operand bundle on the original call
1695 ///    is transferred to the call in the callee.
1696 ///
1697 /// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
1698 ///    a retainRV call.
1699 static void
1700 inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
1701                            const SmallVectorImpl<ReturnInst *> &Returns) {
1702   Module *Mod = CB.getModule();
1703   assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
1704   bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
1705        IsUnsafeClaimRV = !IsRetainRV;
1706 
1707   for (auto *RI : Returns) {
1708     Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
1709     bool InsertRetainCall = IsRetainRV;
1710     IRBuilder<> Builder(RI->getContext());
1711 
1712     // Walk backwards through the basic block looking for either a matching
1713     // autoreleaseRV call or an unannotated call.
1714     auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
1715                                       RI->getParent()->rend());
1716     for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
1717       // Ignore casts.
1718       if (isa<CastInst>(I))
1719         continue;
1720 
1721       if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1722         if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
1723             !II->hasNUses(0) ||
1724             objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
1725           break;
1726 
1727         // If we've found a matching authoreleaseRV call:
1728         // - If claimRV is attached to the call, insert a call to objc_release
1729         //   and erase the autoreleaseRV call.
1730         // - If retainRV is attached to the call, just erase the autoreleaseRV
1731         //   call.
1732         if (IsUnsafeClaimRV) {
1733           Builder.SetInsertPoint(II);
1734           Function *IFn =
1735               Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
1736           Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1737           Builder.CreateCall(IFn, BC, "");
1738         }
1739         II->eraseFromParent();
1740         InsertRetainCall = false;
1741         break;
1742       }
1743 
1744       auto *CI = dyn_cast<CallInst>(&I);
1745 
1746       if (!CI)
1747         break;
1748 
1749       if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
1750           objcarc::hasAttachedCallOpBundle(CI))
1751         break;
1752 
1753       // If we've found an unannotated call that defines RetOpnd, add a
1754       // "clang.arc.attachedcall" operand bundle.
1755       Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
1756       OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
1757       auto *NewCall = CallBase::addOperandBundle(
1758           CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI);
1759       NewCall->copyMetadata(*CI);
1760       CI->replaceAllUsesWith(NewCall);
1761       CI->eraseFromParent();
1762       InsertRetainCall = false;
1763       break;
1764     }
1765 
1766     if (InsertRetainCall) {
1767       // The retainRV is attached to the call and we've failed to find a
1768       // matching autoreleaseRV or an annotated call in the callee. Emit a call
1769       // to objc_retain.
1770       Builder.SetInsertPoint(RI);
1771       Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
1772       Value *BC = Builder.CreateBitCast(RetOpnd, IFn->getArg(0)->getType());
1773       Builder.CreateCall(IFn, BC, "");
1774     }
1775   }
1776 }
1777 
1778 /// This function inlines the called function into the basic block of the
1779 /// caller. This returns false if it is not possible to inline this call.
1780 /// The program is still in a well defined state if this occurs though.
1781 ///
1782 /// Note that this only does one level of inlining.  For example, if the
1783 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1784 /// exists in the instruction stream.  Similarly this will inline a recursive
1785 /// function by one level.
1786 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1787                                         AAResults *CalleeAAR,
1788                                         bool InsertLifetime,
1789                                         Function *ForwardVarArgsTo) {
1790   assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1791 
1792   // FIXME: we don't inline callbr yet.
1793   if (isa<CallBrInst>(CB))
1794     return InlineResult::failure("We don't inline callbr yet.");
1795 
1796   // If IFI has any state in it, zap it before we fill it in.
1797   IFI.reset();
1798 
1799   Function *CalledFunc = CB.getCalledFunction();
1800   if (!CalledFunc ||               // Can't inline external function or indirect
1801       CalledFunc->isDeclaration()) // call!
1802     return InlineResult::failure("external or indirect");
1803 
1804   // The inliner does not know how to inline through calls with operand bundles
1805   // in general ...
1806   if (CB.hasOperandBundles()) {
1807     for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1808       uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1809       // ... but it knows how to inline through "deopt" operand bundles ...
1810       if (Tag == LLVMContext::OB_deopt)
1811         continue;
1812       // ... and "funclet" operand bundles.
1813       if (Tag == LLVMContext::OB_funclet)
1814         continue;
1815       if (Tag == LLVMContext::OB_clang_arc_attachedcall)
1816         continue;
1817 
1818       return InlineResult::failure("unsupported operand bundle");
1819     }
1820   }
1821 
1822   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1823   // calls that we inline.
1824   bool MarkNoUnwind = CB.doesNotThrow();
1825 
1826   BasicBlock *OrigBB = CB.getParent();
1827   Function *Caller = OrigBB->getParent();
1828 
1829   // Do not inline strictfp function into non-strictfp one. It would require
1830   // conversion of all FP operations in host function to constrained intrinsics.
1831   if (CalledFunc->getAttributes().hasFnAttr(Attribute::StrictFP) &&
1832       !Caller->getAttributes().hasFnAttr(Attribute::StrictFP)) {
1833     return InlineResult::failure("incompatible strictfp attributes");
1834   }
1835 
1836   // GC poses two hazards to inlining, which only occur when the callee has GC:
1837   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1838   //     caller.
1839   //  2. If the caller has a differing GC, it is invalid to inline.
1840   if (CalledFunc->hasGC()) {
1841     if (!Caller->hasGC())
1842       Caller->setGC(CalledFunc->getGC());
1843     else if (CalledFunc->getGC() != Caller->getGC())
1844       return InlineResult::failure("incompatible GC");
1845   }
1846 
1847   // Get the personality function from the callee if it contains a landing pad.
1848   Constant *CalledPersonality =
1849       CalledFunc->hasPersonalityFn()
1850           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1851           : nullptr;
1852 
1853   // Find the personality function used by the landing pads of the caller. If it
1854   // exists, then check to see that it matches the personality function used in
1855   // the callee.
1856   Constant *CallerPersonality =
1857       Caller->hasPersonalityFn()
1858           ? Caller->getPersonalityFn()->stripPointerCasts()
1859           : nullptr;
1860   if (CalledPersonality) {
1861     if (!CallerPersonality)
1862       Caller->setPersonalityFn(CalledPersonality);
1863     // If the personality functions match, then we can perform the
1864     // inlining. Otherwise, we can't inline.
1865     // TODO: This isn't 100% true. Some personality functions are proper
1866     //       supersets of others and can be used in place of the other.
1867     else if (CalledPersonality != CallerPersonality)
1868       return InlineResult::failure("incompatible personality");
1869   }
1870 
1871   // We need to figure out which funclet the callsite was in so that we may
1872   // properly nest the callee.
1873   Instruction *CallSiteEHPad = nullptr;
1874   if (CallerPersonality) {
1875     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1876     if (isScopedEHPersonality(Personality)) {
1877       Optional<OperandBundleUse> ParentFunclet =
1878           CB.getOperandBundle(LLVMContext::OB_funclet);
1879       if (ParentFunclet)
1880         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1881 
1882       // OK, the inlining site is legal.  What about the target function?
1883 
1884       if (CallSiteEHPad) {
1885         if (Personality == EHPersonality::MSVC_CXX) {
1886           // The MSVC personality cannot tolerate catches getting inlined into
1887           // cleanup funclets.
1888           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1889             // Ok, the call site is within a cleanuppad.  Let's check the callee
1890             // for catchpads.
1891             for (const BasicBlock &CalledBB : *CalledFunc) {
1892               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1893                 return InlineResult::failure("catch in cleanup funclet");
1894             }
1895           }
1896         } else if (isAsynchronousEHPersonality(Personality)) {
1897           // SEH is even less tolerant, there may not be any sort of exceptional
1898           // funclet in the callee.
1899           for (const BasicBlock &CalledBB : *CalledFunc) {
1900             if (CalledBB.isEHPad())
1901               return InlineResult::failure("SEH in cleanup funclet");
1902           }
1903         }
1904       }
1905     }
1906   }
1907 
1908   // Determine if we are dealing with a call in an EHPad which does not unwind
1909   // to caller.
1910   bool EHPadForCallUnwindsLocally = false;
1911   if (CallSiteEHPad && isa<CallInst>(CB)) {
1912     UnwindDestMemoTy FuncletUnwindMap;
1913     Value *CallSiteUnwindDestToken =
1914         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1915 
1916     EHPadForCallUnwindsLocally =
1917         CallSiteUnwindDestToken &&
1918         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1919   }
1920 
1921   // Get an iterator to the last basic block in the function, which will have
1922   // the new function inlined after it.
1923   Function::iterator LastBlock = --Caller->end();
1924 
1925   // Make sure to capture all of the return instructions from the cloned
1926   // function.
1927   SmallVector<ReturnInst*, 8> Returns;
1928   ClonedCodeInfo InlinedFunctionInfo;
1929   Function::iterator FirstNewBlock;
1930 
1931   { // Scope to destroy VMap after cloning.
1932     ValueToValueMapTy VMap;
1933     struct ByValInit {
1934       Value *Dst;
1935       Value *Src;
1936       Type *Ty;
1937     };
1938     // Keep a list of pair (dst, src) to emit byval initializations.
1939     SmallVector<ByValInit, 4> ByValInits;
1940 
1941     // When inlining a function that contains noalias scope metadata,
1942     // this metadata needs to be cloned so that the inlined blocks
1943     // have different "unique scopes" at every call site.
1944     // Track the metadata that must be cloned. Do this before other changes to
1945     // the function, so that we do not get in trouble when inlining caller ==
1946     // callee.
1947     ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
1948 
1949     auto &DL = Caller->getParent()->getDataLayout();
1950 
1951     // Calculate the vector of arguments to pass into the function cloner, which
1952     // matches up the formal to the actual argument values.
1953     auto AI = CB.arg_begin();
1954     unsigned ArgNo = 0;
1955     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1956          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1957       Value *ActualArg = *AI;
1958 
1959       // When byval arguments actually inlined, we need to make the copy implied
1960       // by them explicit.  However, we don't do this if the callee is readonly
1961       // or readnone, because the copy would be unneeded: the callee doesn't
1962       // modify the struct.
1963       if (CB.isByValArgument(ArgNo)) {
1964         ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
1965                                         &CB, CalledFunc, IFI,
1966                                         CalledFunc->getParamAlignment(ArgNo));
1967         if (ActualArg != *AI)
1968           ByValInits.push_back(
1969               {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
1970       }
1971 
1972       VMap[&*I] = ActualArg;
1973     }
1974 
1975     // TODO: Remove this when users have been updated to the assume bundles.
1976     // Add alignment assumptions if necessary. We do this before the inlined
1977     // instructions are actually cloned into the caller so that we can easily
1978     // check what will be known at the start of the inlined code.
1979     AddAlignmentAssumptions(CB, IFI);
1980 
1981     AssumptionCache *AC =
1982         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1983 
1984     /// Preserve all attributes on of the call and its parameters.
1985     salvageKnowledge(&CB, AC);
1986 
1987     // We want the inliner to prune the code as it copies.  We would LOVE to
1988     // have no dead or constant instructions leftover after inlining occurs
1989     // (which can happen, e.g., because an argument was constant), but we'll be
1990     // happy with whatever the cloner can do.
1991     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1992                               /*ModuleLevelChanges=*/false, Returns, ".i",
1993                               &InlinedFunctionInfo);
1994     // Remember the first block that is newly cloned over.
1995     FirstNewBlock = LastBlock; ++FirstNewBlock;
1996 
1997     // Insert retainRV/clainRV runtime calls.
1998     objcarc::ARCInstKind RVCallKind = objcarc::getAttachedARCFunctionKind(&CB);
1999     if (RVCallKind != objcarc::ARCInstKind::None)
2000       inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2001 
2002     // Updated caller/callee profiles only when requested. For sample loader
2003     // inlining, the context-sensitive inlinee profile doesn't need to be
2004     // subtracted from callee profile, and the inlined clone also doesn't need
2005     // to be scaled based on call site count.
2006     if (IFI.UpdateProfile) {
2007       if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2008         // Update the BFI of blocks cloned into the caller.
2009         updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2010                         CalledFunc->front());
2011 
2012       if (auto Profile = CalledFunc->getEntryCount())
2013         updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2014                           IFI.CallerBFI);
2015     }
2016 
2017     // Inject byval arguments initialization.
2018     for (ByValInit &Init : ByValInits)
2019       HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
2020                               &*FirstNewBlock, IFI);
2021 
2022     Optional<OperandBundleUse> ParentDeopt =
2023         CB.getOperandBundle(LLVMContext::OB_deopt);
2024     if (ParentDeopt) {
2025       SmallVector<OperandBundleDef, 2> OpDefs;
2026 
2027       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2028         CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2029         if (!ICS)
2030           continue; // instruction was DCE'd or RAUW'ed to undef
2031 
2032         OpDefs.clear();
2033 
2034         OpDefs.reserve(ICS->getNumOperandBundles());
2035 
2036         for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2037              ++COBi) {
2038           auto ChildOB = ICS->getOperandBundleAt(COBi);
2039           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2040             // If the inlined call has other operand bundles, let them be
2041             OpDefs.emplace_back(ChildOB);
2042             continue;
2043           }
2044 
2045           // It may be useful to separate this logic (of handling operand
2046           // bundles) out to a separate "policy" component if this gets crowded.
2047           // Prepend the parent's deoptimization continuation to the newly
2048           // inlined call's deoptimization continuation.
2049           std::vector<Value *> MergedDeoptArgs;
2050           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2051                                   ChildOB.Inputs.size());
2052 
2053           llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2054           llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2055 
2056           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2057         }
2058 
2059         Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
2060 
2061         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2062         // this even if the call returns void.
2063         ICS->replaceAllUsesWith(NewI);
2064 
2065         VH = nullptr;
2066         ICS->eraseFromParent();
2067       }
2068     }
2069 
2070     // Update the callgraph if requested.
2071     if (IFI.CG)
2072       UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
2073 
2074     // For 'nodebug' functions, the associated DISubprogram is always null.
2075     // Conservatively avoid propagating the callsite debug location to
2076     // instructions inlined from a function whose DISubprogram is not null.
2077     fixupLineNumbers(Caller, FirstNewBlock, &CB,
2078                      CalledFunc->getSubprogram() != nullptr);
2079 
2080     // Now clone the inlined noalias scope metadata.
2081     SAMetadataCloner.clone();
2082     SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2083 
2084     // Add noalias metadata if necessary.
2085     AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2086 
2087     // Clone return attributes on the callsite into the calls within the inlined
2088     // function which feed into its return value.
2089     AddReturnAttributes(CB, VMap);
2090 
2091     // Propagate metadata on the callsite if necessary.
2092     PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2093 
2094     // Register any cloned assumptions.
2095     if (IFI.GetAssumptionCache)
2096       for (BasicBlock &NewBlock :
2097            make_range(FirstNewBlock->getIterator(), Caller->end()))
2098         for (Instruction &I : NewBlock)
2099           if (auto *II = dyn_cast<AssumeInst>(&I))
2100             IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2101   }
2102 
2103   // If there are any alloca instructions in the block that used to be the entry
2104   // block for the callee, move them to the entry block of the caller.  First
2105   // calculate which instruction they should be inserted before.  We insert the
2106   // instructions at the end of the current alloca list.
2107   {
2108     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2109     for (BasicBlock::iterator I = FirstNewBlock->begin(),
2110          E = FirstNewBlock->end(); I != E; ) {
2111       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2112       if (!AI) continue;
2113 
2114       // If the alloca is now dead, remove it.  This often occurs due to code
2115       // specialization.
2116       if (AI->use_empty()) {
2117         AI->eraseFromParent();
2118         continue;
2119       }
2120 
2121       if (!allocaWouldBeStaticInEntry(AI))
2122         continue;
2123 
2124       // Keep track of the static allocas that we inline into the caller.
2125       IFI.StaticAllocas.push_back(AI);
2126 
2127       // Scan for the block of allocas that we can move over, and move them
2128       // all at once.
2129       while (isa<AllocaInst>(I) &&
2130              !cast<AllocaInst>(I)->use_empty() &&
2131              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2132         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2133         ++I;
2134       }
2135 
2136       // Transfer all of the allocas over in a block.  Using splice means
2137       // that the instructions aren't removed from the symbol table, then
2138       // reinserted.
2139       Caller->getEntryBlock().getInstList().splice(
2140           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
2141     }
2142   }
2143 
2144   SmallVector<Value*,4> VarArgsToForward;
2145   SmallVector<AttributeSet, 4> VarArgsAttrs;
2146   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2147        i < CB.arg_size(); i++) {
2148     VarArgsToForward.push_back(CB.getArgOperand(i));
2149     VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2150   }
2151 
2152   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2153   if (InlinedFunctionInfo.ContainsCalls) {
2154     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2155     if (CallInst *CI = dyn_cast<CallInst>(&CB))
2156       CallSiteTailKind = CI->getTailCallKind();
2157 
2158     // For inlining purposes, the "notail" marker is the same as no marker.
2159     if (CallSiteTailKind == CallInst::TCK_NoTail)
2160       CallSiteTailKind = CallInst::TCK_None;
2161 
2162     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2163          ++BB) {
2164       for (Instruction &I : llvm::make_early_inc_range(*BB)) {
2165         CallInst *CI = dyn_cast<CallInst>(&I);
2166         if (!CI)
2167           continue;
2168 
2169         // Forward varargs from inlined call site to calls to the
2170         // ForwardVarArgsTo function, if requested, and to musttail calls.
2171         if (!VarArgsToForward.empty() &&
2172             ((ForwardVarArgsTo &&
2173               CI->getCalledFunction() == ForwardVarArgsTo) ||
2174              CI->isMustTailCall())) {
2175           // Collect attributes for non-vararg parameters.
2176           AttributeList Attrs = CI->getAttributes();
2177           SmallVector<AttributeSet, 8> ArgAttrs;
2178           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2179             for (unsigned ArgNo = 0;
2180                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2181               ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2182           }
2183 
2184           // Add VarArg attributes.
2185           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2186           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2187                                      Attrs.getRetAttrs(), ArgAttrs);
2188           // Add VarArgs to existing parameters.
2189           SmallVector<Value *, 6> Params(CI->args());
2190           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2191           CallInst *NewCI = CallInst::Create(
2192               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2193           NewCI->setDebugLoc(CI->getDebugLoc());
2194           NewCI->setAttributes(Attrs);
2195           NewCI->setCallingConv(CI->getCallingConv());
2196           CI->replaceAllUsesWith(NewCI);
2197           CI->eraseFromParent();
2198           CI = NewCI;
2199         }
2200 
2201         if (Function *F = CI->getCalledFunction())
2202           InlinedDeoptimizeCalls |=
2203               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2204 
2205         // We need to reduce the strength of any inlined tail calls.  For
2206         // musttail, we have to avoid introducing potential unbounded stack
2207         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2208         // with musttail, we can inline 'g' into 'f' so long as we preserve
2209         // musttail on the cloned call to 'f'.  If either the inlined call site
2210         // or the cloned call site is *not* musttail, the program already has
2211         // one frame of stack growth, so it's safe to remove musttail.  Here is
2212         // a table of example transformations:
2213         //
2214         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2215         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2216         //    f ->          g -> musttail f  ==>  f ->          f
2217         //    f ->          g ->     tail f  ==>  f ->          f
2218         //
2219         // Inlined notail calls should remain notail calls.
2220         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2221         if (ChildTCK != CallInst::TCK_NoTail)
2222           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2223         CI->setTailCallKind(ChildTCK);
2224         InlinedMustTailCalls |= CI->isMustTailCall();
2225 
2226         // Call sites inlined through a 'nounwind' call site should be
2227         // 'nounwind' as well. However, avoid marking call sites explicitly
2228         // where possible. This helps expose more opportunities for CSE after
2229         // inlining, commonly when the callee is an intrinsic.
2230         if (MarkNoUnwind && !CI->doesNotThrow())
2231           CI->setDoesNotThrow();
2232       }
2233     }
2234   }
2235 
2236   // Leave lifetime markers for the static alloca's, scoping them to the
2237   // function we just inlined.
2238   // We need to insert lifetime intrinsics even at O0 to avoid invalid
2239   // access caused by multithreaded coroutines. The check
2240   // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2241   if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2242       !IFI.StaticAllocas.empty()) {
2243     IRBuilder<> builder(&FirstNewBlock->front());
2244     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2245       AllocaInst *AI = IFI.StaticAllocas[ai];
2246       // Don't mark swifterror allocas. They can't have bitcast uses.
2247       if (AI->isSwiftError())
2248         continue;
2249 
2250       // If the alloca is already scoped to something smaller than the whole
2251       // function then there's no need to add redundant, less accurate markers.
2252       if (hasLifetimeMarkers(AI))
2253         continue;
2254 
2255       // Try to determine the size of the allocation.
2256       ConstantInt *AllocaSize = nullptr;
2257       if (ConstantInt *AIArraySize =
2258           dyn_cast<ConstantInt>(AI->getArraySize())) {
2259         auto &DL = Caller->getParent()->getDataLayout();
2260         Type *AllocaType = AI->getAllocatedType();
2261         TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2262         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2263 
2264         // Don't add markers for zero-sized allocas.
2265         if (AllocaArraySize == 0)
2266           continue;
2267 
2268         // Check that array size doesn't saturate uint64_t and doesn't
2269         // overflow when it's multiplied by type size.
2270         if (!AllocaTypeSize.isScalable() &&
2271             AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2272             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2273                 AllocaTypeSize.getFixedSize()) {
2274           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2275                                         AllocaArraySize * AllocaTypeSize);
2276         }
2277       }
2278 
2279       builder.CreateLifetimeStart(AI, AllocaSize);
2280       for (ReturnInst *RI : Returns) {
2281         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2282         // call and a return.  The return kills all local allocas.
2283         if (InlinedMustTailCalls &&
2284             RI->getParent()->getTerminatingMustTailCall())
2285           continue;
2286         if (InlinedDeoptimizeCalls &&
2287             RI->getParent()->getTerminatingDeoptimizeCall())
2288           continue;
2289         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2290       }
2291     }
2292   }
2293 
2294   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2295   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2296   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2297     Module *M = Caller->getParent();
2298     // Get the two intrinsics we care about.
2299     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2300     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2301 
2302     // Insert the llvm.stacksave.
2303     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2304                              .CreateCall(StackSave, {}, "savedstack");
2305 
2306     // Insert a call to llvm.stackrestore before any return instructions in the
2307     // inlined function.
2308     for (ReturnInst *RI : Returns) {
2309       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2310       // call and a return.  The return will restore the stack pointer.
2311       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2312         continue;
2313       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2314         continue;
2315       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2316     }
2317   }
2318 
2319   // If we are inlining for an invoke instruction, we must make sure to rewrite
2320   // any call instructions into invoke instructions.  This is sensitive to which
2321   // funclet pads were top-level in the inlinee, so must be done before
2322   // rewriting the "parent pad" links.
2323   if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2324     BasicBlock *UnwindDest = II->getUnwindDest();
2325     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2326     if (isa<LandingPadInst>(FirstNonPHI)) {
2327       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2328     } else {
2329       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2330     }
2331   }
2332 
2333   // Update the lexical scopes of the new funclets and callsites.
2334   // Anything that had 'none' as its parent is now nested inside the callsite's
2335   // EHPad.
2336   if (CallSiteEHPad) {
2337     for (Function::iterator BB = FirstNewBlock->getIterator(),
2338                             E = Caller->end();
2339          BB != E; ++BB) {
2340       // Add bundle operands to inlined call sites.
2341       PropagateOperandBundles(BB, CallSiteEHPad);
2342 
2343       // It is problematic if the inlinee has a cleanupret which unwinds to
2344       // caller and we inline it into a call site which doesn't unwind but into
2345       // an EH pad that does.  Such an edge must be dynamically unreachable.
2346       // As such, we replace the cleanupret with unreachable.
2347       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2348         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2349           changeToUnreachable(CleanupRet);
2350 
2351       Instruction *I = BB->getFirstNonPHI();
2352       if (!I->isEHPad())
2353         continue;
2354 
2355       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2356         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2357           CatchSwitch->setParentPad(CallSiteEHPad);
2358       } else {
2359         auto *FPI = cast<FuncletPadInst>(I);
2360         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2361           FPI->setParentPad(CallSiteEHPad);
2362       }
2363     }
2364   }
2365 
2366   if (InlinedDeoptimizeCalls) {
2367     // We need to at least remove the deoptimizing returns from the Return set,
2368     // so that the control flow from those returns does not get merged into the
2369     // caller (but terminate it instead).  If the caller's return type does not
2370     // match the callee's return type, we also need to change the return type of
2371     // the intrinsic.
2372     if (Caller->getReturnType() == CB.getType()) {
2373       llvm::erase_if(Returns, [](ReturnInst *RI) {
2374         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2375       });
2376     } else {
2377       SmallVector<ReturnInst *, 8> NormalReturns;
2378       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2379           Caller->getParent(), Intrinsic::experimental_deoptimize,
2380           {Caller->getReturnType()});
2381 
2382       for (ReturnInst *RI : Returns) {
2383         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2384         if (!DeoptCall) {
2385           NormalReturns.push_back(RI);
2386           continue;
2387         }
2388 
2389         // The calling convention on the deoptimize call itself may be bogus,
2390         // since the code we're inlining may have undefined behavior (and may
2391         // never actually execute at runtime); but all
2392         // @llvm.experimental.deoptimize declarations have to have the same
2393         // calling convention in a well-formed module.
2394         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2395         NewDeoptIntrinsic->setCallingConv(CallingConv);
2396         auto *CurBB = RI->getParent();
2397         RI->eraseFromParent();
2398 
2399         SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2400 
2401         SmallVector<OperandBundleDef, 1> OpBundles;
2402         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2403         auto DeoptAttributes = DeoptCall->getAttributes();
2404         DeoptCall->eraseFromParent();
2405         assert(!OpBundles.empty() &&
2406                "Expected at least the deopt operand bundle");
2407 
2408         IRBuilder<> Builder(CurBB);
2409         CallInst *NewDeoptCall =
2410             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2411         NewDeoptCall->setCallingConv(CallingConv);
2412         NewDeoptCall->setAttributes(DeoptAttributes);
2413         if (NewDeoptCall->getType()->isVoidTy())
2414           Builder.CreateRetVoid();
2415         else
2416           Builder.CreateRet(NewDeoptCall);
2417       }
2418 
2419       // Leave behind the normal returns so we can merge control flow.
2420       std::swap(Returns, NormalReturns);
2421     }
2422   }
2423 
2424   // Handle any inlined musttail call sites.  In order for a new call site to be
2425   // musttail, the source of the clone and the inlined call site must have been
2426   // musttail.  Therefore it's safe to return without merging control into the
2427   // phi below.
2428   if (InlinedMustTailCalls) {
2429     // Check if we need to bitcast the result of any musttail calls.
2430     Type *NewRetTy = Caller->getReturnType();
2431     bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2432 
2433     // Handle the returns preceded by musttail calls separately.
2434     SmallVector<ReturnInst *, 8> NormalReturns;
2435     for (ReturnInst *RI : Returns) {
2436       CallInst *ReturnedMustTail =
2437           RI->getParent()->getTerminatingMustTailCall();
2438       if (!ReturnedMustTail) {
2439         NormalReturns.push_back(RI);
2440         continue;
2441       }
2442       if (!NeedBitCast)
2443         continue;
2444 
2445       // Delete the old return and any preceding bitcast.
2446       BasicBlock *CurBB = RI->getParent();
2447       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2448       RI->eraseFromParent();
2449       if (OldCast)
2450         OldCast->eraseFromParent();
2451 
2452       // Insert a new bitcast and return with the right type.
2453       IRBuilder<> Builder(CurBB);
2454       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2455     }
2456 
2457     // Leave behind the normal returns so we can merge control flow.
2458     std::swap(Returns, NormalReturns);
2459   }
2460 
2461   // Now that all of the transforms on the inlined code have taken place but
2462   // before we splice the inlined code into the CFG and lose track of which
2463   // blocks were actually inlined, collect the call sites. We only do this if
2464   // call graph updates weren't requested, as those provide value handle based
2465   // tracking of inlined call sites instead. Calls to intrinsics are not
2466   // collected because they are not inlineable.
2467   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2468     // Otherwise just collect the raw call sites that were inlined.
2469     for (BasicBlock &NewBB :
2470          make_range(FirstNewBlock->getIterator(), Caller->end()))
2471       for (Instruction &I : NewBB)
2472         if (auto *CB = dyn_cast<CallBase>(&I))
2473           if (!(CB->getCalledFunction() &&
2474                 CB->getCalledFunction()->isIntrinsic()))
2475             IFI.InlinedCallSites.push_back(CB);
2476   }
2477 
2478   // If we cloned in _exactly one_ basic block, and if that block ends in a
2479   // return instruction, we splice the body of the inlined callee directly into
2480   // the calling basic block.
2481   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2482     // Move all of the instructions right before the call.
2483     OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2484                                  FirstNewBlock->begin(), FirstNewBlock->end());
2485     // Remove the cloned basic block.
2486     Caller->getBasicBlockList().pop_back();
2487 
2488     // If the call site was an invoke instruction, add a branch to the normal
2489     // destination.
2490     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2491       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2492       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2493     }
2494 
2495     // If the return instruction returned a value, replace uses of the call with
2496     // uses of the returned value.
2497     if (!CB.use_empty()) {
2498       ReturnInst *R = Returns[0];
2499       if (&CB == R->getReturnValue())
2500         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2501       else
2502         CB.replaceAllUsesWith(R->getReturnValue());
2503     }
2504     // Since we are now done with the Call/Invoke, we can delete it.
2505     CB.eraseFromParent();
2506 
2507     // Since we are now done with the return instruction, delete it also.
2508     Returns[0]->eraseFromParent();
2509 
2510     // We are now done with the inlining.
2511     return InlineResult::success();
2512   }
2513 
2514   // Otherwise, we have the normal case, of more than one block to inline or
2515   // multiple return sites.
2516 
2517   // We want to clone the entire callee function into the hole between the
2518   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2519   // this is an invoke instruction or a call instruction.
2520   BasicBlock *AfterCallBB;
2521   BranchInst *CreatedBranchToNormalDest = nullptr;
2522   if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2523 
2524     // Add an unconditional branch to make this look like the CallInst case...
2525     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2526 
2527     // Split the basic block.  This guarantees that no PHI nodes will have to be
2528     // updated due to new incoming edges, and make the invoke case more
2529     // symmetric to the call case.
2530     AfterCallBB =
2531         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2532                                 CalledFunc->getName() + ".exit");
2533 
2534   } else { // It's a call
2535     // If this is a call instruction, we need to split the basic block that
2536     // the call lives in.
2537     //
2538     AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2539                                           CalledFunc->getName() + ".exit");
2540   }
2541 
2542   if (IFI.CallerBFI) {
2543     // Copy original BB's block frequency to AfterCallBB
2544     IFI.CallerBFI->setBlockFreq(
2545         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2546   }
2547 
2548   // Change the branch that used to go to AfterCallBB to branch to the first
2549   // basic block of the inlined function.
2550   //
2551   Instruction *Br = OrigBB->getTerminator();
2552   assert(Br && Br->getOpcode() == Instruction::Br &&
2553          "splitBasicBlock broken!");
2554   Br->setOperand(0, &*FirstNewBlock);
2555 
2556   // Now that the function is correct, make it a little bit nicer.  In
2557   // particular, move the basic blocks inserted from the end of the function
2558   // into the space made by splitting the source basic block.
2559   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2560                                      Caller->getBasicBlockList(), FirstNewBlock,
2561                                      Caller->end());
2562 
2563   // Handle all of the return instructions that we just cloned in, and eliminate
2564   // any users of the original call/invoke instruction.
2565   Type *RTy = CalledFunc->getReturnType();
2566 
2567   PHINode *PHI = nullptr;
2568   if (Returns.size() > 1) {
2569     // The PHI node should go at the front of the new basic block to merge all
2570     // possible incoming values.
2571     if (!CB.use_empty()) {
2572       PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2573                             &AfterCallBB->front());
2574       // Anything that used the result of the function call should now use the
2575       // PHI node as their operand.
2576       CB.replaceAllUsesWith(PHI);
2577     }
2578 
2579     // Loop over all of the return instructions adding entries to the PHI node
2580     // as appropriate.
2581     if (PHI) {
2582       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2583         ReturnInst *RI = Returns[i];
2584         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2585                "Ret value not consistent in function!");
2586         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2587       }
2588     }
2589 
2590     // Add a branch to the merge points and remove return instructions.
2591     DebugLoc Loc;
2592     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2593       ReturnInst *RI = Returns[i];
2594       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2595       Loc = RI->getDebugLoc();
2596       BI->setDebugLoc(Loc);
2597       RI->eraseFromParent();
2598     }
2599     // We need to set the debug location to *somewhere* inside the
2600     // inlined function. The line number may be nonsensical, but the
2601     // instruction will at least be associated with the right
2602     // function.
2603     if (CreatedBranchToNormalDest)
2604       CreatedBranchToNormalDest->setDebugLoc(Loc);
2605   } else if (!Returns.empty()) {
2606     // Otherwise, if there is exactly one return value, just replace anything
2607     // using the return value of the call with the computed value.
2608     if (!CB.use_empty()) {
2609       if (&CB == Returns[0]->getReturnValue())
2610         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2611       else
2612         CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2613     }
2614 
2615     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2616     BasicBlock *ReturnBB = Returns[0]->getParent();
2617     ReturnBB->replaceAllUsesWith(AfterCallBB);
2618 
2619     // Splice the code from the return block into the block that it will return
2620     // to, which contains the code that was after the call.
2621     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2622                                       ReturnBB->getInstList());
2623 
2624     if (CreatedBranchToNormalDest)
2625       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2626 
2627     // Delete the return instruction now and empty ReturnBB now.
2628     Returns[0]->eraseFromParent();
2629     ReturnBB->eraseFromParent();
2630   } else if (!CB.use_empty()) {
2631     // No returns, but something is using the return value of the call.  Just
2632     // nuke the result.
2633     CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
2634   }
2635 
2636   // Since we are now done with the Call/Invoke, we can delete it.
2637   CB.eraseFromParent();
2638 
2639   // If we inlined any musttail calls and the original return is now
2640   // unreachable, delete it.  It can only contain a bitcast and ret.
2641   if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2642     AfterCallBB->eraseFromParent();
2643 
2644   // We should always be able to fold the entry block of the function into the
2645   // single predecessor of the block...
2646   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2647   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2648 
2649   // Splice the code entry block into calling block, right before the
2650   // unconditional branch.
2651   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2652   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2653 
2654   // Remove the unconditional branch.
2655   OrigBB->getInstList().erase(Br);
2656 
2657   // Now we can remove the CalleeEntry block, which is now empty.
2658   Caller->getBasicBlockList().erase(CalleeEntry);
2659 
2660   // If we inserted a phi node, check to see if it has a single value (e.g. all
2661   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2662   // block other optimizations.
2663   if (PHI) {
2664     AssumptionCache *AC =
2665         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2666     auto &DL = Caller->getParent()->getDataLayout();
2667     if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2668       PHI->replaceAllUsesWith(V);
2669       PHI->eraseFromParent();
2670     }
2671   }
2672 
2673   return InlineResult::success();
2674 }
2675