xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp (revision d56accc7c3dcc897489b6a07834763a03b9f3d68)
1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the mechanics required to implement inlining without
10 // missing any calls and updating the call graph.  The decisions of which calls
11 // are profitable to inline are implemented elsewhere.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/Inliner.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/ScopeExit.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/GlobalsModRef.h"
32 #include "llvm/Analysis/InlineAdvisor.h"
33 #include "llvm/Analysis/InlineCost.h"
34 #include "llvm/Analysis/InlineOrder.h"
35 #include "llvm/Analysis/LazyCallGraph.h"
36 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
37 #include "llvm/Analysis/ProfileSummaryInfo.h"
38 #include "llvm/Analysis/ReplayInlineAdvisor.h"
39 #include "llvm/Analysis/TargetLibraryInfo.h"
40 #include "llvm/Analysis/TargetTransformInfo.h"
41 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugLoc.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/InstIterator.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/PassManager.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
64 #include "llvm/Transforms/Utils/Cloning.h"
65 #include "llvm/Transforms/Utils/Local.h"
66 #include "llvm/Transforms/Utils/ModuleUtils.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <functional>
70 #include <sstream>
71 #include <tuple>
72 #include <utility>
73 #include <vector>
74 
75 using namespace llvm;
76 
77 #define DEBUG_TYPE "inline"
78 
79 STATISTIC(NumInlined, "Number of functions inlined");
80 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
81 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
82 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
83 
84 /// Flag to disable manual alloca merging.
85 ///
86 /// Merging of allocas was originally done as a stack-size saving technique
87 /// prior to LLVM's code generator having support for stack coloring based on
88 /// lifetime markers. It is now in the process of being removed. To experiment
89 /// with disabling it and relying fully on lifetime marker based stack
90 /// coloring, you can pass this flag to LLVM.
91 static cl::opt<bool>
92     DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
93                                 cl::init(false), cl::Hidden);
94 
95 /// A flag for test, so we can print the content of the advisor when running it
96 /// as part of the default (e.g. -O3) pipeline.
97 static cl::opt<bool> KeepAdvisorForPrinting("keep-inline-advisor-for-printing",
98                                             cl::init(false), cl::Hidden);
99 
100 extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
101 
102 static cl::opt<std::string> CGSCCInlineReplayFile(
103     "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"),
104     cl::desc(
105         "Optimization remarks file containing inline remarks to be replayed "
106         "by cgscc inlining."),
107     cl::Hidden);
108 
109 static cl::opt<ReplayInlinerSettings::Scope> CGSCCInlineReplayScope(
110     "cgscc-inline-replay-scope",
111     cl::init(ReplayInlinerSettings::Scope::Function),
112     cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function",
113                           "Replay on functions that have remarks associated "
114                           "with them (default)"),
115                clEnumValN(ReplayInlinerSettings::Scope::Module, "Module",
116                           "Replay on the entire module")),
117     cl::desc("Whether inline replay should be applied to the entire "
118              "Module or just the Functions (default) that are present as "
119              "callers in remarks during cgscc inlining."),
120     cl::Hidden);
121 
122 static cl::opt<ReplayInlinerSettings::Fallback> CGSCCInlineReplayFallback(
123     "cgscc-inline-replay-fallback",
124     cl::init(ReplayInlinerSettings::Fallback::Original),
125     cl::values(
126         clEnumValN(
127             ReplayInlinerSettings::Fallback::Original, "Original",
128             "All decisions not in replay send to original advisor (default)"),
129         clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline,
130                    "AlwaysInline", "All decisions not in replay are inlined"),
131         clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline",
132                    "All decisions not in replay are not inlined")),
133     cl::desc(
134         "How cgscc inline replay treats sites that don't come from the replay. "
135         "Original: defers to original advisor, AlwaysInline: inline all sites "
136         "not in replay, NeverInline: inline no sites not in replay"),
137     cl::Hidden);
138 
139 static cl::opt<CallSiteFormat::Format> CGSCCInlineReplayFormat(
140     "cgscc-inline-replay-format",
141     cl::init(CallSiteFormat::Format::LineColumnDiscriminator),
142     cl::values(
143         clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"),
144         clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn",
145                    "<Line Number>:<Column Number>"),
146         clEnumValN(CallSiteFormat::Format::LineDiscriminator,
147                    "LineDiscriminator", "<Line Number>.<Discriminator>"),
148         clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator,
149                    "LineColumnDiscriminator",
150                    "<Line Number>:<Column Number>.<Discriminator> (default)")),
151     cl::desc("How cgscc inline replay file is formatted"), cl::Hidden);
152 
153 static cl::opt<bool> InlineEnablePriorityOrder(
154     "inline-enable-priority-order", cl::Hidden, cl::init(false),
155     cl::desc("Enable the priority inline order for the inliner"));
156 
157 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {}
158 
159 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
160     : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
161 
162 /// For this class, we declare that we require and preserve the call graph.
163 /// If the derived class implements this method, it should
164 /// always explicitly call the implementation here.
165 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const {
166   AU.addRequired<AssumptionCacheTracker>();
167   AU.addRequired<ProfileSummaryInfoWrapperPass>();
168   AU.addRequired<TargetLibraryInfoWrapperPass>();
169   getAAResultsAnalysisUsage(AU);
170   CallGraphSCCPass::getAnalysisUsage(AU);
171 }
172 
173 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>;
174 
175 /// Look at all of the allocas that we inlined through this call site.  If we
176 /// have already inlined other allocas through other calls into this function,
177 /// then we know that they have disjoint lifetimes and that we can merge them.
178 ///
179 /// There are many heuristics possible for merging these allocas, and the
180 /// different options have different tradeoffs.  One thing that we *really*
181 /// don't want to hurt is SRoA: once inlining happens, often allocas are no
182 /// longer address taken and so they can be promoted.
183 ///
184 /// Our "solution" for that is to only merge allocas whose outermost type is an
185 /// array type.  These are usually not promoted because someone is using a
186 /// variable index into them.  These are also often the most important ones to
187 /// merge.
188 ///
189 /// A better solution would be to have real memory lifetime markers in the IR
190 /// and not have the inliner do any merging of allocas at all.  This would
191 /// allow the backend to do proper stack slot coloring of all allocas that
192 /// *actually make it to the backend*, which is really what we want.
193 ///
194 /// Because we don't have this information, we do this simple and useful hack.
195 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI,
196                                      InlinedArrayAllocasTy &InlinedArrayAllocas,
197                                      int InlineHistory) {
198   SmallPtrSet<AllocaInst *, 16> UsedAllocas;
199 
200   // When processing our SCC, check to see if the call site was inlined from
201   // some other call site.  For example, if we're processing "A" in this code:
202   //   A() { B() }
203   //   B() { x = alloca ... C() }
204   //   C() { y = alloca ... }
205   // Assume that C was not inlined into B initially, and so we're processing A
206   // and decide to inline B into A.  Doing this makes an alloca available for
207   // reuse and makes a callsite (C) available for inlining.  When we process
208   // the C call site we don't want to do any alloca merging between X and Y
209   // because their scopes are not disjoint.  We could make this smarter by
210   // keeping track of the inline history for each alloca in the
211   // InlinedArrayAllocas but this isn't likely to be a significant win.
212   if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
213     return;
214 
215   // Loop over all the allocas we have so far and see if they can be merged with
216   // a previously inlined alloca.  If not, remember that we had it.
217   for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E;
218        ++AllocaNo) {
219     AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
220 
221     // Don't bother trying to merge array allocations (they will usually be
222     // canonicalized to be an allocation *of* an array), or allocations whose
223     // type is not itself an array (because we're afraid of pessimizing SRoA).
224     ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
225     if (!ATy || AI->isArrayAllocation())
226       continue;
227 
228     // Get the list of all available allocas for this array type.
229     std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy];
230 
231     // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
232     // that we have to be careful not to reuse the same "available" alloca for
233     // multiple different allocas that we just inlined, we use the 'UsedAllocas'
234     // set to keep track of which "available" allocas are being used by this
235     // function.  Also, AllocasForType can be empty of course!
236     bool MergedAwayAlloca = false;
237     for (AllocaInst *AvailableAlloca : AllocasForType) {
238       Align Align1 = AI->getAlign();
239       Align Align2 = AvailableAlloca->getAlign();
240 
241       // The available alloca has to be in the right function, not in some other
242       // function in this SCC.
243       if (AvailableAlloca->getParent() != AI->getParent())
244         continue;
245 
246       // If the inlined function already uses this alloca then we can't reuse
247       // it.
248       if (!UsedAllocas.insert(AvailableAlloca).second)
249         continue;
250 
251       // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
252       // success!
253       LLVM_DEBUG(dbgs() << "    ***MERGED ALLOCA: " << *AI
254                         << "\n\t\tINTO: " << *AvailableAlloca << '\n');
255 
256       // Move affected dbg.declare calls immediately after the new alloca to
257       // avoid the situation when a dbg.declare precedes its alloca.
258       if (auto *L = LocalAsMetadata::getIfExists(AI))
259         if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
260           for (User *U : MDV->users())
261             if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
262               DDI->moveBefore(AvailableAlloca->getNextNode());
263 
264       AI->replaceAllUsesWith(AvailableAlloca);
265 
266       if (Align1 > Align2)
267         AvailableAlloca->setAlignment(AI->getAlign());
268 
269       AI->eraseFromParent();
270       MergedAwayAlloca = true;
271       ++NumMergedAllocas;
272       IFI.StaticAllocas[AllocaNo] = nullptr;
273       break;
274     }
275 
276     // If we already nuked the alloca, we're done with it.
277     if (MergedAwayAlloca)
278       continue;
279 
280     // If we were unable to merge away the alloca either because there are no
281     // allocas of the right type available or because we reused them all
282     // already, remember that this alloca came from an inlined function and mark
283     // it used so we don't reuse it for other allocas from this inline
284     // operation.
285     AllocasForType.push_back(AI);
286     UsedAllocas.insert(AI);
287   }
288 }
289 
290 /// If it is possible to inline the specified call site,
291 /// do so and update the CallGraph for this operation.
292 ///
293 /// This function also does some basic book-keeping to update the IR.  The
294 /// InlinedArrayAllocas map keeps track of any allocas that are already
295 /// available from other functions inlined into the caller.  If we are able to
296 /// inline this call site we attempt to reuse already available allocas or add
297 /// any new allocas to the set if not possible.
298 static InlineResult inlineCallIfPossible(
299     CallBase &CB, InlineFunctionInfo &IFI,
300     InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory,
301     bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter,
302     ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
303   Function *Callee = CB.getCalledFunction();
304   Function *Caller = CB.getCaller();
305 
306   AAResults &AAR = AARGetter(*Callee);
307 
308   // Try to inline the function.  Get the list of static allocas that were
309   // inlined.
310   InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime);
311   if (!IR.isSuccess())
312     return IR;
313 
314   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
315     ImportedFunctionsStats.recordInline(*Caller, *Callee);
316 
317   AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
318 
319   if (!DisableInlinedAllocaMerging)
320     mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
321 
322   return IR; // success
323 }
324 
325 /// Return true if the specified inline history ID
326 /// indicates an inline history that includes the specified function.
327 static bool inlineHistoryIncludes(
328     Function *F, int InlineHistoryID,
329     const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
330   while (InlineHistoryID != -1) {
331     assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
332            "Invalid inline history ID");
333     if (InlineHistory[InlineHistoryID].first == F)
334       return true;
335     InlineHistoryID = InlineHistory[InlineHistoryID].second;
336   }
337   return false;
338 }
339 
340 bool LegacyInlinerBase::doInitialization(CallGraph &CG) {
341   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
342     ImportedFunctionsStats.setModuleInfo(CG.getModule());
343   return false; // No changes to CallGraph.
344 }
345 
346 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) {
347   if (skipSCC(SCC))
348     return false;
349   return inlineCalls(SCC);
350 }
351 
352 static bool
353 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
354                 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
355                 ProfileSummaryInfo *PSI,
356                 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
357                 bool InsertLifetime,
358                 function_ref<InlineCost(CallBase &CB)> GetInlineCost,
359                 function_ref<AAResults &(Function &)> AARGetter,
360                 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
361   SmallPtrSet<Function *, 8> SCCFunctions;
362   LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
363   for (CallGraphNode *Node : SCC) {
364     Function *F = Node->getFunction();
365     if (F)
366       SCCFunctions.insert(F);
367     LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
368   }
369 
370   // Scan through and identify all call sites ahead of time so that we only
371   // inline call sites in the original functions, not call sites that result
372   // from inlining other functions.
373   SmallVector<std::pair<CallBase *, int>, 16> CallSites;
374 
375   // When inlining a callee produces new call sites, we want to keep track of
376   // the fact that they were inlined from the callee.  This allows us to avoid
377   // infinite inlining in some obscure cases.  To represent this, we use an
378   // index into the InlineHistory vector.
379   SmallVector<std::pair<Function *, int>, 8> InlineHistory;
380 
381   for (CallGraphNode *Node : SCC) {
382     Function *F = Node->getFunction();
383     if (!F || F->isDeclaration())
384       continue;
385 
386     OptimizationRemarkEmitter ORE(F);
387     for (BasicBlock &BB : *F)
388       for (Instruction &I : BB) {
389         auto *CB = dyn_cast<CallBase>(&I);
390         // If this isn't a call, or it is a call to an intrinsic, it can
391         // never be inlined.
392         if (!CB || isa<IntrinsicInst>(I))
393           continue;
394 
395         // If this is a direct call to an external function, we can never inline
396         // it.  If it is an indirect call, inlining may resolve it to be a
397         // direct call, so we keep it.
398         if (Function *Callee = CB->getCalledFunction())
399           if (Callee->isDeclaration()) {
400             using namespace ore;
401 
402             setInlineRemark(*CB, "unavailable definition");
403             ORE.emit([&]() {
404               return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
405                      << NV("Callee", Callee) << " will not be inlined into "
406                      << NV("Caller", CB->getCaller())
407                      << " because its definition is unavailable"
408                      << setIsVerbose();
409             });
410             continue;
411           }
412 
413         CallSites.push_back(std::make_pair(CB, -1));
414       }
415   }
416 
417   LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
418 
419   // If there are no calls in this function, exit early.
420   if (CallSites.empty())
421     return false;
422 
423   // Now that we have all of the call sites, move the ones to functions in the
424   // current SCC to the end of the list.
425   unsigned FirstCallInSCC = CallSites.size();
426   for (unsigned I = 0; I < FirstCallInSCC; ++I)
427     if (Function *F = CallSites[I].first->getCalledFunction())
428       if (SCCFunctions.count(F))
429         std::swap(CallSites[I--], CallSites[--FirstCallInSCC]);
430 
431   InlinedArrayAllocasTy InlinedArrayAllocas;
432   InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI);
433 
434   // Now that we have all of the call sites, loop over them and inline them if
435   // it looks profitable to do so.
436   bool Changed = false;
437   bool LocalChange;
438   do {
439     LocalChange = false;
440     // Iterate over the outer loop because inlining functions can cause indirect
441     // calls to become direct calls.
442     // CallSites may be modified inside so ranged for loop can not be used.
443     for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
444       auto &P = CallSites[CSi];
445       CallBase &CB = *P.first;
446       const int InlineHistoryID = P.second;
447 
448       Function *Caller = CB.getCaller();
449       Function *Callee = CB.getCalledFunction();
450 
451       // We can only inline direct calls to non-declarations.
452       if (!Callee || Callee->isDeclaration())
453         continue;
454 
455       bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller));
456 
457       if (!IsTriviallyDead) {
458         // If this call site was obtained by inlining another function, verify
459         // that the include path for the function did not include the callee
460         // itself.  If so, we'd be recursively inlining the same function,
461         // which would provide the same callsites, which would cause us to
462         // infinitely inline.
463         if (InlineHistoryID != -1 &&
464             inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) {
465           setInlineRemark(CB, "recursive");
466           continue;
467         }
468       }
469 
470       // FIXME for new PM: because of the old PM we currently generate ORE and
471       // in turn BFI on demand.  With the new PM, the ORE dependency should
472       // just become a regular analysis dependency.
473       OptimizationRemarkEmitter ORE(Caller);
474 
475       auto OIC = shouldInline(CB, GetInlineCost, ORE);
476       // If the policy determines that we should inline this function,
477       // delete the call instead.
478       if (!OIC)
479         continue;
480 
481       // If this call site is dead and it is to a readonly function, we should
482       // just delete the call instead of trying to inline it, regardless of
483       // size.  This happens because IPSCCP propagates the result out of the
484       // call and then we're left with the dead call.
485       if (IsTriviallyDead) {
486         LLVM_DEBUG(dbgs() << "    -> Deleting dead call: " << CB << "\n");
487         // Update the call graph by deleting the edge from Callee to Caller.
488         setInlineRemark(CB, "trivially dead");
489         CG[Caller]->removeCallEdgeFor(CB);
490         CB.eraseFromParent();
491         ++NumCallsDeleted;
492       } else {
493         // Get DebugLoc to report. CB will be invalid after Inliner.
494         DebugLoc DLoc = CB.getDebugLoc();
495         BasicBlock *Block = CB.getParent();
496 
497         // Attempt to inline the function.
498         using namespace ore;
499 
500         InlineResult IR = inlineCallIfPossible(
501             CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID,
502             InsertLifetime, AARGetter, ImportedFunctionsStats);
503         if (!IR.isSuccess()) {
504           setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " +
505                                   inlineCostStr(*OIC));
506           ORE.emit([&]() {
507             return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
508                                             Block)
509                    << NV("Callee", Callee) << " will not be inlined into "
510                    << NV("Caller", Caller) << ": "
511                    << NV("Reason", IR.getFailureReason());
512           });
513           continue;
514         }
515         ++NumInlined;
516 
517         emitInlinedIntoBasedOnCost(ORE, DLoc, Block, *Callee, *Caller, *OIC);
518 
519         // If inlining this function gave us any new call sites, throw them
520         // onto our worklist to process.  They are useful inline candidates.
521         if (!InlineInfo.InlinedCalls.empty()) {
522           // Create a new inline history entry for this, so that we remember
523           // that these new callsites came about due to inlining Callee.
524           int NewHistoryID = InlineHistory.size();
525           InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
526 
527 #ifndef NDEBUG
528           // Make sure no dupplicates in the inline candidates. This could
529           // happen when a callsite is simpilfied to reusing the return value
530           // of another callsite during function cloning, thus the other
531           // callsite will be reconsidered here.
532           DenseSet<CallBase *> DbgCallSites;
533           for (auto &II : CallSites)
534             DbgCallSites.insert(II.first);
535 #endif
536 
537           for (Value *Ptr : InlineInfo.InlinedCalls) {
538 #ifndef NDEBUG
539             assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0);
540 #endif
541             CallSites.push_back(
542                 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID));
543           }
544         }
545       }
546 
547       // If we inlined or deleted the last possible call site to the function,
548       // delete the function body now.
549       if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
550           // TODO: Can remove if in SCC now.
551           !SCCFunctions.count(Callee) &&
552           // The function may be apparently dead, but if there are indirect
553           // callgraph references to the node, we cannot delete it yet, this
554           // could invalidate the CGSCC iterator.
555           CG[Callee]->getNumReferences() == 0) {
556         LLVM_DEBUG(dbgs() << "    -> Deleting dead function: "
557                           << Callee->getName() << "\n");
558         CallGraphNode *CalleeNode = CG[Callee];
559 
560         // Remove any call graph edges from the callee to its callees.
561         CalleeNode->removeAllCalledFunctions();
562 
563         // Removing the node for callee from the call graph and delete it.
564         delete CG.removeFunctionFromModule(CalleeNode);
565         ++NumDeleted;
566       }
567 
568       // Remove this call site from the list.  If possible, use
569       // swap/pop_back for efficiency, but do not use it if doing so would
570       // move a call site to a function in this SCC before the
571       // 'FirstCallInSCC' barrier.
572       if (SCC.isSingular()) {
573         CallSites[CSi] = CallSites.back();
574         CallSites.pop_back();
575       } else {
576         CallSites.erase(CallSites.begin() + CSi);
577       }
578       --CSi;
579 
580       Changed = true;
581       LocalChange = true;
582     }
583   } while (LocalChange);
584 
585   return Changed;
586 }
587 
588 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) {
589   CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
590   ACT = &getAnalysis<AssumptionCacheTracker>();
591   PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
592   GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
593     return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
594   };
595   auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
596     return ACT->getAssumptionCache(F);
597   };
598   return inlineCallsImpl(
599       SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime,
600       [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this),
601       ImportedFunctionsStats);
602 }
603 
604 /// Remove now-dead linkonce functions at the end of
605 /// processing to avoid breaking the SCC traversal.
606 bool LegacyInlinerBase::doFinalization(CallGraph &CG) {
607   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
608     ImportedFunctionsStats.dump(InlinerFunctionImportStats ==
609                                 InlinerFunctionImportStatsOpts::Verbose);
610   return removeDeadFunctions(CG);
611 }
612 
613 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
614 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
615                                             bool AlwaysInlineOnly) {
616   SmallVector<CallGraphNode *, 16> FunctionsToRemove;
617   SmallVector<Function *, 16> DeadFunctionsInComdats;
618 
619   auto RemoveCGN = [&](CallGraphNode *CGN) {
620     // Remove any call graph edges from the function to its callees.
621     CGN->removeAllCalledFunctions();
622 
623     // Remove any edges from the external node to the function's call graph
624     // node.  These edges might have been made irrelegant due to
625     // optimization of the program.
626     CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
627 
628     // Removing the node for callee from the call graph and delete it.
629     FunctionsToRemove.push_back(CGN);
630   };
631 
632   // Scan for all of the functions, looking for ones that should now be removed
633   // from the program.  Insert the dead ones in the FunctionsToRemove set.
634   for (const auto &I : CG) {
635     CallGraphNode *CGN = I.second.get();
636     Function *F = CGN->getFunction();
637     if (!F || F->isDeclaration())
638       continue;
639 
640     // Handle the case when this function is called and we only want to care
641     // about always-inline functions. This is a bit of a hack to share code
642     // between here and the InlineAlways pass.
643     if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
644       continue;
645 
646     // If the only remaining users of the function are dead constants, remove
647     // them.
648     F->removeDeadConstantUsers();
649 
650     if (!F->isDefTriviallyDead())
651       continue;
652 
653     // It is unsafe to drop a function with discardable linkage from a COMDAT
654     // without also dropping the other members of the COMDAT.
655     // The inliner doesn't visit non-function entities which are in COMDAT
656     // groups so it is unsafe to do so *unless* the linkage is local.
657     if (!F->hasLocalLinkage()) {
658       if (F->hasComdat()) {
659         DeadFunctionsInComdats.push_back(F);
660         continue;
661       }
662     }
663 
664     RemoveCGN(CGN);
665   }
666   if (!DeadFunctionsInComdats.empty()) {
667     // Filter out the functions whose comdats remain alive.
668     filterDeadComdatFunctions(DeadFunctionsInComdats);
669     // Remove the rest.
670     for (Function *F : DeadFunctionsInComdats)
671       RemoveCGN(CG[F]);
672   }
673 
674   if (FunctionsToRemove.empty())
675     return false;
676 
677   // Now that we know which functions to delete, do so.  We didn't want to do
678   // this inline, because that would invalidate our CallGraph::iterator
679   // objects. :(
680   //
681   // Note that it doesn't matter that we are iterating over a non-stable order
682   // here to do this, it doesn't matter which order the functions are deleted
683   // in.
684   array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
685   FunctionsToRemove.erase(
686       std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
687       FunctionsToRemove.end());
688   for (CallGraphNode *CGN : FunctionsToRemove) {
689     delete CG.removeFunctionFromModule(CGN);
690     ++NumDeleted;
691   }
692   return true;
693 }
694 
695 InlineAdvisor &
696 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
697                         FunctionAnalysisManager &FAM, Module &M) {
698   if (OwnedAdvisor)
699     return *OwnedAdvisor;
700 
701   auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
702   if (!IAA) {
703     // It should still be possible to run the inliner as a stand-alone SCC pass,
704     // for test scenarios. In that case, we default to the
705     // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass
706     // runs. It also uses just the default InlineParams.
707     // In this case, we need to use the provided FAM, which is valid for the
708     // duration of the inliner pass, and thus the lifetime of the owned advisor.
709     // The one we would get from the MAM can be invalidated as a result of the
710     // inliner's activity.
711     OwnedAdvisor =
712         std::make_unique<DefaultInlineAdvisor>(M, FAM, getInlineParams());
713 
714     if (!CGSCCInlineReplayFile.empty())
715       OwnedAdvisor = getReplayInlineAdvisor(
716           M, FAM, M.getContext(), std::move(OwnedAdvisor),
717           ReplayInlinerSettings{CGSCCInlineReplayFile,
718                                 CGSCCInlineReplayScope,
719                                 CGSCCInlineReplayFallback,
720                                 {CGSCCInlineReplayFormat}},
721           /*EmitRemarks=*/true);
722 
723     return *OwnedAdvisor;
724   }
725   assert(IAA->getAdvisor() &&
726          "Expected a present InlineAdvisorAnalysis also have an "
727          "InlineAdvisor initialized");
728   return *IAA->getAdvisor();
729 }
730 
731 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
732                                    CGSCCAnalysisManager &AM, LazyCallGraph &CG,
733                                    CGSCCUpdateResult &UR) {
734   const auto &MAMProxy =
735       AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG);
736   bool Changed = false;
737 
738   assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
739   Module &M = *InitialC.begin()->getFunction().getParent();
740   ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M);
741 
742   FunctionAnalysisManager &FAM =
743       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
744           .getManager();
745 
746   InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
747   Advisor.onPassEntry();
748 
749   auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(&InitialC); });
750 
751   // We use a single common worklist for calls across the entire SCC. We
752   // process these in-order and append new calls introduced during inlining to
753   // the end. The PriorityInlineOrder is optional here, in which the smaller
754   // callee would have a higher priority to inline.
755   //
756   // Note that this particular order of processing is actually critical to
757   // avoid very bad behaviors. Consider *highly connected* call graphs where
758   // each function contains a small amount of code and a couple of calls to
759   // other functions. Because the LLVM inliner is fundamentally a bottom-up
760   // inliner, it can handle gracefully the fact that these all appear to be
761   // reasonable inlining candidates as it will flatten things until they become
762   // too big to inline, and then move on and flatten another batch.
763   //
764   // However, when processing call edges *within* an SCC we cannot rely on this
765   // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
766   // functions we can end up incrementally inlining N calls into each of
767   // N functions because each incremental inlining decision looks good and we
768   // don't have a topological ordering to prevent explosions.
769   //
770   // To compensate for this, we don't process transitive edges made immediate
771   // by inlining until we've done one pass of inlining across the entire SCC.
772   // Large, highly connected SCCs still lead to some amount of code bloat in
773   // this model, but it is uniformly spread across all the functions in the SCC
774   // and eventually they all become too large to inline, rather than
775   // incrementally maknig a single function grow in a super linear fashion.
776   std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> Calls;
777   if (InlineEnablePriorityOrder)
778     Calls = std::make_unique<PriorityInlineOrder<InlineSizePriority>>();
779   else
780     Calls = std::make_unique<DefaultInlineOrder<std::pair<CallBase *, int>>>();
781   assert(Calls != nullptr && "Expected an initialized InlineOrder");
782 
783   // Populate the initial list of calls in this SCC.
784   for (auto &N : InitialC) {
785     auto &ORE =
786         FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
787     // We want to generally process call sites top-down in order for
788     // simplifications stemming from replacing the call with the returned value
789     // after inlining to be visible to subsequent inlining decisions.
790     // FIXME: Using instructions sequence is a really bad way to do this.
791     // Instead we should do an actual RPO walk of the function body.
792     for (Instruction &I : instructions(N.getFunction()))
793       if (auto *CB = dyn_cast<CallBase>(&I))
794         if (Function *Callee = CB->getCalledFunction()) {
795           if (!Callee->isDeclaration())
796             Calls->push({CB, -1});
797           else if (!isa<IntrinsicInst>(I)) {
798             using namespace ore;
799             setInlineRemark(*CB, "unavailable definition");
800             ORE.emit([&]() {
801               return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
802                      << NV("Callee", Callee) << " will not be inlined into "
803                      << NV("Caller", CB->getCaller())
804                      << " because its definition is unavailable"
805                      << setIsVerbose();
806             });
807           }
808         }
809   }
810   if (Calls->empty())
811     return PreservedAnalyses::all();
812 
813   // Capture updatable variable for the current SCC.
814   auto *C = &InitialC;
815 
816   // When inlining a callee produces new call sites, we want to keep track of
817   // the fact that they were inlined from the callee.  This allows us to avoid
818   // infinite inlining in some obscure cases.  To represent this, we use an
819   // index into the InlineHistory vector.
820   SmallVector<std::pair<Function *, int>, 16> InlineHistory;
821 
822   // Track a set vector of inlined callees so that we can augment the caller
823   // with all of their edges in the call graph before pruning out the ones that
824   // got simplified away.
825   SmallSetVector<Function *, 4> InlinedCallees;
826 
827   // Track the dead functions to delete once finished with inlining calls. We
828   // defer deleting these to make it easier to handle the call graph updates.
829   SmallVector<Function *, 4> DeadFunctions;
830 
831   // Track potentially dead non-local functions with comdats to see if they can
832   // be deleted as a batch after inlining.
833   SmallVector<Function *, 4> DeadFunctionsInComdats;
834 
835   // Loop forward over all of the calls.
836   while (!Calls->empty()) {
837     // We expect the calls to typically be batched with sequences of calls that
838     // have the same caller, so we first set up some shared infrastructure for
839     // this caller. We also do any pruning we can at this layer on the caller
840     // alone.
841     Function &F = *Calls->front().first->getCaller();
842     LazyCallGraph::Node &N = *CG.lookup(F);
843     if (CG.lookupSCC(N) != C) {
844       Calls->pop();
845       continue;
846     }
847 
848     LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"
849                       << "    Function size: " << F.getInstructionCount()
850                       << "\n");
851 
852     auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
853       return FAM.getResult<AssumptionAnalysis>(F);
854     };
855 
856     // Now process as many calls as we have within this caller in the sequence.
857     // We bail out as soon as the caller has to change so we can update the
858     // call graph and prepare the context of that new caller.
859     bool DidInline = false;
860     while (!Calls->empty() && Calls->front().first->getCaller() == &F) {
861       auto P = Calls->pop();
862       CallBase *CB = P.first;
863       const int InlineHistoryID = P.second;
864       Function &Callee = *CB->getCalledFunction();
865 
866       if (InlineHistoryID != -1 &&
867           inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) {
868         LLVM_DEBUG(dbgs() << "Skipping inlining due to history: "
869                           << F.getName() << " -> " << Callee.getName() << "\n");
870         setInlineRemark(*CB, "recursive");
871         continue;
872       }
873 
874       // Check if this inlining may repeat breaking an SCC apart that has
875       // already been split once before. In that case, inlining here may
876       // trigger infinite inlining, much like is prevented within the inliner
877       // itself by the InlineHistory above, but spread across CGSCC iterations
878       // and thus hidden from the full inline history.
879       if (CG.lookupSCC(*CG.lookup(Callee)) == C &&
880           UR.InlinedInternalEdges.count({&N, C})) {
881         LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
882                              "previously split out of this SCC by inlining: "
883                           << F.getName() << " -> " << Callee.getName() << "\n");
884         setInlineRemark(*CB, "recursive SCC split");
885         continue;
886       }
887 
888       std::unique_ptr<InlineAdvice> Advice =
889           Advisor.getAdvice(*CB, OnlyMandatory);
890 
891       // Check whether we want to inline this callsite.
892       if (!Advice)
893         continue;
894 
895       if (!Advice->isInliningRecommended()) {
896         Advice->recordUnattemptedInlining();
897         continue;
898       }
899 
900       // Setup the data structure used to plumb customization into the
901       // `InlineFunction` routine.
902       InlineFunctionInfo IFI(
903           /*cg=*/nullptr, GetAssumptionCache, PSI,
904           &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())),
905           &FAM.getResult<BlockFrequencyAnalysis>(Callee));
906 
907       InlineResult IR =
908           InlineFunction(*CB, IFI, &FAM.getResult<AAManager>(*CB->getCaller()));
909       if (!IR.isSuccess()) {
910         Advice->recordUnsuccessfulInlining(IR);
911         continue;
912       }
913 
914       DidInline = true;
915       InlinedCallees.insert(&Callee);
916       ++NumInlined;
917 
918       LLVM_DEBUG(dbgs() << "    Size after inlining: "
919                         << F.getInstructionCount() << "\n");
920 
921       // Add any new callsites to defined functions to the worklist.
922       if (!IFI.InlinedCallSites.empty()) {
923         int NewHistoryID = InlineHistory.size();
924         InlineHistory.push_back({&Callee, InlineHistoryID});
925 
926         for (CallBase *ICB : reverse(IFI.InlinedCallSites)) {
927           Function *NewCallee = ICB->getCalledFunction();
928           assert(!(NewCallee && NewCallee->isIntrinsic()) &&
929                  "Intrinsic calls should not be tracked.");
930           if (!NewCallee) {
931             // Try to promote an indirect (virtual) call without waiting for
932             // the post-inline cleanup and the next DevirtSCCRepeatedPass
933             // iteration because the next iteration may not happen and we may
934             // miss inlining it.
935             if (tryPromoteCall(*ICB))
936               NewCallee = ICB->getCalledFunction();
937           }
938           if (NewCallee)
939             if (!NewCallee->isDeclaration())
940               Calls->push({ICB, NewHistoryID});
941         }
942       }
943 
944       // Merge the attributes based on the inlining.
945       AttributeFuncs::mergeAttributesForInlining(F, Callee);
946 
947       // For local functions or discardable functions without comdats, check
948       // whether this makes the callee trivially dead. In that case, we can drop
949       // the body of the function eagerly which may reduce the number of callers
950       // of other functions to one, changing inline cost thresholds. Non-local
951       // discardable functions with comdats are checked later on.
952       bool CalleeWasDeleted = false;
953       if (Callee.isDiscardableIfUnused() && Callee.hasZeroLiveUses() &&
954           !CG.isLibFunction(Callee)) {
955         if (Callee.hasLocalLinkage() || !Callee.hasComdat()) {
956           Calls->erase_if([&](const std::pair<CallBase *, int> &Call) {
957             return Call.first->getCaller() == &Callee;
958           });
959           // Clear the body and queue the function itself for deletion when we
960           // finish inlining and call graph updates.
961           // Note that after this point, it is an error to do anything other
962           // than use the callee's address or delete it.
963           Callee.dropAllReferences();
964           assert(!is_contained(DeadFunctions, &Callee) &&
965                  "Cannot put cause a function to become dead twice!");
966           DeadFunctions.push_back(&Callee);
967           CalleeWasDeleted = true;
968         } else {
969           DeadFunctionsInComdats.push_back(&Callee);
970         }
971       }
972       if (CalleeWasDeleted)
973         Advice->recordInliningWithCalleeDeleted();
974       else
975         Advice->recordInlining();
976     }
977 
978     if (!DidInline)
979       continue;
980     Changed = true;
981 
982     // At this point, since we have made changes we have at least removed
983     // a call instruction. However, in the process we do some incremental
984     // simplification of the surrounding code. This simplification can
985     // essentially do all of the same things as a function pass and we can
986     // re-use the exact same logic for updating the call graph to reflect the
987     // change.
988 
989     // Inside the update, we also update the FunctionAnalysisManager in the
990     // proxy for this particular SCC. We do this as the SCC may have changed and
991     // as we're going to mutate this particular function we want to make sure
992     // the proxy is in place to forward any invalidation events.
993     LazyCallGraph::SCC *OldC = C;
994     C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM);
995     LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
996 
997     // If this causes an SCC to split apart into multiple smaller SCCs, there
998     // is a subtle risk we need to prepare for. Other transformations may
999     // expose an "infinite inlining" opportunity later, and because of the SCC
1000     // mutation, we will revisit this function and potentially re-inline. If we
1001     // do, and that re-inlining also has the potentially to mutate the SCC
1002     // structure, the infinite inlining problem can manifest through infinite
1003     // SCC splits and merges. To avoid this, we capture the originating caller
1004     // node and the SCC containing the call edge. This is a slight over
1005     // approximation of the possible inlining decisions that must be avoided,
1006     // but is relatively efficient to store. We use C != OldC to know when
1007     // a new SCC is generated and the original SCC may be generated via merge
1008     // in later iterations.
1009     //
1010     // It is also possible that even if no new SCC is generated
1011     // (i.e., C == OldC), the original SCC could be split and then merged
1012     // into the same one as itself. and the original SCC will be added into
1013     // UR.CWorklist again, we want to catch such cases too.
1014     //
1015     // FIXME: This seems like a very heavyweight way of retaining the inline
1016     // history, we should look for a more efficient way of tracking it.
1017     if ((C != OldC || UR.CWorklist.count(OldC)) &&
1018         llvm::any_of(InlinedCallees, [&](Function *Callee) {
1019           return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
1020         })) {
1021       LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
1022                            "retaining this to avoid infinite inlining.\n");
1023       UR.InlinedInternalEdges.insert({&N, OldC});
1024     }
1025     InlinedCallees.clear();
1026 
1027     // Invalidate analyses for this function now so that we don't have to
1028     // invalidate analyses for all functions in this SCC later.
1029     FAM.invalidate(F, PreservedAnalyses::none());
1030   }
1031 
1032   // We must ensure that we only delete functions with comdats if every function
1033   // in the comdat is going to be deleted.
1034   if (!DeadFunctionsInComdats.empty()) {
1035     filterDeadComdatFunctions(DeadFunctionsInComdats);
1036     for (auto *Callee : DeadFunctionsInComdats)
1037       Callee->dropAllReferences();
1038     DeadFunctions.append(DeadFunctionsInComdats);
1039   }
1040 
1041   // Now that we've finished inlining all of the calls across this SCC, delete
1042   // all of the trivially dead functions, updating the call graph and the CGSCC
1043   // pass manager in the process.
1044   //
1045   // Note that this walks a pointer set which has non-deterministic order but
1046   // that is OK as all we do is delete things and add pointers to unordered
1047   // sets.
1048   for (Function *DeadF : DeadFunctions) {
1049     // Get the necessary information out of the call graph and nuke the
1050     // function there. Also, clear out any cached analyses.
1051     auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
1052     FAM.clear(*DeadF, DeadF->getName());
1053     AM.clear(DeadC, DeadC.getName());
1054     auto &DeadRC = DeadC.getOuterRefSCC();
1055     CG.removeDeadFunction(*DeadF);
1056 
1057     // Mark the relevant parts of the call graph as invalid so we don't visit
1058     // them.
1059     UR.InvalidatedSCCs.insert(&DeadC);
1060     UR.InvalidatedRefSCCs.insert(&DeadRC);
1061 
1062     // If the updated SCC was the one containing the deleted function, clear it.
1063     if (&DeadC == UR.UpdatedC)
1064       UR.UpdatedC = nullptr;
1065 
1066     // And delete the actual function from the module.
1067     M.getFunctionList().erase(DeadF);
1068 
1069     ++NumDeleted;
1070   }
1071 
1072   if (!Changed)
1073     return PreservedAnalyses::all();
1074 
1075   PreservedAnalyses PA;
1076   // Even if we change the IR, we update the core CGSCC data structures and so
1077   // can preserve the proxy to the function analysis manager.
1078   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1079   // We have already invalidated all analyses on modified functions.
1080   PA.preserveSet<AllAnalysesOn<Function>>();
1081   return PA;
1082 }
1083 
1084 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
1085                                                    bool MandatoryFirst,
1086                                                    InliningAdvisorMode Mode,
1087                                                    unsigned MaxDevirtIterations)
1088     : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations) {
1089   // Run the inliner first. The theory is that we are walking bottom-up and so
1090   // the callees have already been fully optimized, and we want to inline them
1091   // into the callers so that our optimizations can reflect that.
1092   // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO
1093   // because it makes profile annotation in the backend inaccurate.
1094   if (MandatoryFirst)
1095     PM.addPass(InlinerPass(/*OnlyMandatory*/ true));
1096   PM.addPass(InlinerPass());
1097 }
1098 
1099 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
1100                                                 ModuleAnalysisManager &MAM) {
1101   auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M);
1102   if (!IAA.tryCreate(Params, Mode,
1103                      {CGSCCInlineReplayFile,
1104                       CGSCCInlineReplayScope,
1105                       CGSCCInlineReplayFallback,
1106                       {CGSCCInlineReplayFormat}})) {
1107     M.getContext().emitError(
1108         "Could not setup Inlining Advisor for the requested "
1109         "mode and/or options");
1110     return PreservedAnalyses::all();
1111   }
1112 
1113   // We wrap the CGSCC pipeline in a devirtualization repeater. This will try
1114   // to detect when we devirtualize indirect calls and iterate the SCC passes
1115   // in that case to try and catch knock-on inlining or function attrs
1116   // opportunities. Then we add it to the module pipeline by walking the SCCs
1117   // in postorder (or bottom-up).
1118   // If MaxDevirtIterations is 0, we just don't use the devirtualization
1119   // wrapper.
1120   if (MaxDevirtIterations == 0)
1121     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM)));
1122   else
1123     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
1124         createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations)));
1125 
1126   MPM.addPass(std::move(AfterCGMPM));
1127   MPM.run(M, MAM);
1128 
1129   // Discard the InlineAdvisor, a subsequent inlining session should construct
1130   // its own.
1131   auto PA = PreservedAnalyses::all();
1132   if (!KeepAdvisorForPrinting)
1133     PA.abandon<InlineAdvisorAnalysis>();
1134   return PA;
1135 }
1136 
1137 void InlinerPass::printPipeline(
1138     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1139   static_cast<PassInfoMixin<InlinerPass> *>(this)->printPipeline(
1140       OS, MapClassName2PassName);
1141   if (OnlyMandatory)
1142     OS << "<only-mandatory>";
1143 }
1144 
1145 void ModuleInlinerWrapperPass::printPipeline(
1146     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1147   // Print some info about passes added to the wrapper. This is however
1148   // incomplete as InlineAdvisorAnalysis part isn't included (which also depends
1149   // on Params and Mode).
1150   if (!MPM.isEmpty()) {
1151     MPM.printPipeline(OS, MapClassName2PassName);
1152     OS << ",";
1153   }
1154   OS << "cgscc(";
1155   if (MaxDevirtIterations != 0)
1156     OS << "devirt<" << MaxDevirtIterations << ">(";
1157   PM.printPipeline(OS, MapClassName2PassName);
1158   if (MaxDevirtIterations != 0)
1159     OS << ")";
1160   OS << ")";
1161 }
1162