xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/Inliner.cpp (revision c8e7f78a3d28ff6e6223ed136ada8e1e2f34965e)
1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the mechanics required to implement inlining without
10 // missing any calls and updating the call graph.  The decisions of which calls
11 // are profitable to inline are implemented elsewhere.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/Inliner.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/PriorityWorklist.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/Analysis/InlineAdvisor.h"
31 #include "llvm/Analysis/InlineCost.h"
32 #include "llvm/Analysis/LazyCallGraph.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/ProfileSummaryInfo.h"
35 #include "llvm/Analysis/ReplayInlineAdvisor.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/InstIterator.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/PassManager.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
59 #include "llvm/Transforms/Utils/Cloning.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include "llvm/Transforms/Utils/ModuleUtils.h"
62 #include <algorithm>
63 #include <cassert>
64 #include <functional>
65 #include <utility>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "inline"
71 
72 STATISTIC(NumInlined, "Number of functions inlined");
73 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
74 
75 static cl::opt<int> IntraSCCCostMultiplier(
76     "intra-scc-cost-multiplier", cl::init(2), cl::Hidden,
77     cl::desc(
78         "Cost multiplier to multiply onto inlined call sites where the "
79         "new call was previously an intra-SCC call (not relevant when the "
80         "original call was already intra-SCC). This can accumulate over "
81         "multiple inlinings (e.g. if a call site already had a cost "
82         "multiplier and one of its inlined calls was also subject to "
83         "this, the inlined call would have the original multiplier "
84         "multiplied by intra-scc-cost-multiplier). This is to prevent tons of "
85         "inlining through a child SCC which can cause terrible compile times"));
86 
87 /// A flag for test, so we can print the content of the advisor when running it
88 /// as part of the default (e.g. -O3) pipeline.
89 static cl::opt<bool> KeepAdvisorForPrinting("keep-inline-advisor-for-printing",
90                                             cl::init(false), cl::Hidden);
91 
92 /// Allows printing the contents of the advisor after each SCC inliner pass.
93 static cl::opt<bool>
94     EnablePostSCCAdvisorPrinting("enable-scc-inline-advisor-printing",
95                                  cl::init(false), cl::Hidden);
96 
97 
98 static cl::opt<std::string> CGSCCInlineReplayFile(
99     "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"),
100     cl::desc(
101         "Optimization remarks file containing inline remarks to be replayed "
102         "by cgscc inlining."),
103     cl::Hidden);
104 
105 static cl::opt<ReplayInlinerSettings::Scope> CGSCCInlineReplayScope(
106     "cgscc-inline-replay-scope",
107     cl::init(ReplayInlinerSettings::Scope::Function),
108     cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function",
109                           "Replay on functions that have remarks associated "
110                           "with them (default)"),
111                clEnumValN(ReplayInlinerSettings::Scope::Module, "Module",
112                           "Replay on the entire module")),
113     cl::desc("Whether inline replay should be applied to the entire "
114              "Module or just the Functions (default) that are present as "
115              "callers in remarks during cgscc inlining."),
116     cl::Hidden);
117 
118 static cl::opt<ReplayInlinerSettings::Fallback> CGSCCInlineReplayFallback(
119     "cgscc-inline-replay-fallback",
120     cl::init(ReplayInlinerSettings::Fallback::Original),
121     cl::values(
122         clEnumValN(
123             ReplayInlinerSettings::Fallback::Original, "Original",
124             "All decisions not in replay send to original advisor (default)"),
125         clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline,
126                    "AlwaysInline", "All decisions not in replay are inlined"),
127         clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline",
128                    "All decisions not in replay are not inlined")),
129     cl::desc(
130         "How cgscc inline replay treats sites that don't come from the replay. "
131         "Original: defers to original advisor, AlwaysInline: inline all sites "
132         "not in replay, NeverInline: inline no sites not in replay"),
133     cl::Hidden);
134 
135 static cl::opt<CallSiteFormat::Format> CGSCCInlineReplayFormat(
136     "cgscc-inline-replay-format",
137     cl::init(CallSiteFormat::Format::LineColumnDiscriminator),
138     cl::values(
139         clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"),
140         clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn",
141                    "<Line Number>:<Column Number>"),
142         clEnumValN(CallSiteFormat::Format::LineDiscriminator,
143                    "LineDiscriminator", "<Line Number>.<Discriminator>"),
144         clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator,
145                    "LineColumnDiscriminator",
146                    "<Line Number>:<Column Number>.<Discriminator> (default)")),
147     cl::desc("How cgscc inline replay file is formatted"), cl::Hidden);
148 
149 /// Return true if the specified inline history ID
150 /// indicates an inline history that includes the specified function.
151 static bool inlineHistoryIncludes(
152     Function *F, int InlineHistoryID,
153     const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
154   while (InlineHistoryID != -1) {
155     assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
156            "Invalid inline history ID");
157     if (InlineHistory[InlineHistoryID].first == F)
158       return true;
159     InlineHistoryID = InlineHistory[InlineHistoryID].second;
160   }
161   return false;
162 }
163 
164 InlineAdvisor &
165 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
166                         FunctionAnalysisManager &FAM, Module &M) {
167   if (OwnedAdvisor)
168     return *OwnedAdvisor;
169 
170   auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
171   if (!IAA) {
172     // It should still be possible to run the inliner as a stand-alone SCC pass,
173     // for test scenarios. In that case, we default to the
174     // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass
175     // runs. It also uses just the default InlineParams.
176     // In this case, we need to use the provided FAM, which is valid for the
177     // duration of the inliner pass, and thus the lifetime of the owned advisor.
178     // The one we would get from the MAM can be invalidated as a result of the
179     // inliner's activity.
180     OwnedAdvisor = std::make_unique<DefaultInlineAdvisor>(
181         M, FAM, getInlineParams(),
182         InlineContext{LTOPhase, InlinePass::CGSCCInliner});
183 
184     if (!CGSCCInlineReplayFile.empty())
185       OwnedAdvisor = getReplayInlineAdvisor(
186           M, FAM, M.getContext(), std::move(OwnedAdvisor),
187           ReplayInlinerSettings{CGSCCInlineReplayFile,
188                                 CGSCCInlineReplayScope,
189                                 CGSCCInlineReplayFallback,
190                                 {CGSCCInlineReplayFormat}},
191           /*EmitRemarks=*/true,
192           InlineContext{LTOPhase, InlinePass::ReplayCGSCCInliner});
193 
194     return *OwnedAdvisor;
195   }
196   assert(IAA->getAdvisor() &&
197          "Expected a present InlineAdvisorAnalysis also have an "
198          "InlineAdvisor initialized");
199   return *IAA->getAdvisor();
200 }
201 
202 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
203                                    CGSCCAnalysisManager &AM, LazyCallGraph &CG,
204                                    CGSCCUpdateResult &UR) {
205   const auto &MAMProxy =
206       AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG);
207   bool Changed = false;
208 
209   assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
210   Module &M = *InitialC.begin()->getFunction().getParent();
211   ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M);
212 
213   FunctionAnalysisManager &FAM =
214       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
215           .getManager();
216 
217   InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
218   Advisor.onPassEntry(&InitialC);
219 
220   auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(&InitialC); });
221 
222   // We use a single common worklist for calls across the entire SCC. We
223   // process these in-order and append new calls introduced during inlining to
224   // the end. The PriorityInlineOrder is optional here, in which the smaller
225   // callee would have a higher priority to inline.
226   //
227   // Note that this particular order of processing is actually critical to
228   // avoid very bad behaviors. Consider *highly connected* call graphs where
229   // each function contains a small amount of code and a couple of calls to
230   // other functions. Because the LLVM inliner is fundamentally a bottom-up
231   // inliner, it can handle gracefully the fact that these all appear to be
232   // reasonable inlining candidates as it will flatten things until they become
233   // too big to inline, and then move on and flatten another batch.
234   //
235   // However, when processing call edges *within* an SCC we cannot rely on this
236   // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
237   // functions we can end up incrementally inlining N calls into each of
238   // N functions because each incremental inlining decision looks good and we
239   // don't have a topological ordering to prevent explosions.
240   //
241   // To compensate for this, we don't process transitive edges made immediate
242   // by inlining until we've done one pass of inlining across the entire SCC.
243   // Large, highly connected SCCs still lead to some amount of code bloat in
244   // this model, but it is uniformly spread across all the functions in the SCC
245   // and eventually they all become too large to inline, rather than
246   // incrementally maknig a single function grow in a super linear fashion.
247   SmallVector<std::pair<CallBase *, int>, 16> Calls;
248 
249   // Populate the initial list of calls in this SCC.
250   for (auto &N : InitialC) {
251     auto &ORE =
252         FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
253     // We want to generally process call sites top-down in order for
254     // simplifications stemming from replacing the call with the returned value
255     // after inlining to be visible to subsequent inlining decisions.
256     // FIXME: Using instructions sequence is a really bad way to do this.
257     // Instead we should do an actual RPO walk of the function body.
258     for (Instruction &I : instructions(N.getFunction()))
259       if (auto *CB = dyn_cast<CallBase>(&I))
260         if (Function *Callee = CB->getCalledFunction()) {
261           if (!Callee->isDeclaration())
262             Calls.push_back({CB, -1});
263           else if (!isa<IntrinsicInst>(I)) {
264             using namespace ore;
265             setInlineRemark(*CB, "unavailable definition");
266             ORE.emit([&]() {
267               return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
268                      << NV("Callee", Callee) << " will not be inlined into "
269                      << NV("Caller", CB->getCaller())
270                      << " because its definition is unavailable"
271                      << setIsVerbose();
272             });
273           }
274         }
275   }
276   if (Calls.empty())
277     return PreservedAnalyses::all();
278 
279   // Capture updatable variable for the current SCC.
280   auto *C = &InitialC;
281 
282   // When inlining a callee produces new call sites, we want to keep track of
283   // the fact that they were inlined from the callee.  This allows us to avoid
284   // infinite inlining in some obscure cases.  To represent this, we use an
285   // index into the InlineHistory vector.
286   SmallVector<std::pair<Function *, int>, 16> InlineHistory;
287 
288   // Track a set vector of inlined callees so that we can augment the caller
289   // with all of their edges in the call graph before pruning out the ones that
290   // got simplified away.
291   SmallSetVector<Function *, 4> InlinedCallees;
292 
293   // Track the dead functions to delete once finished with inlining calls. We
294   // defer deleting these to make it easier to handle the call graph updates.
295   SmallVector<Function *, 4> DeadFunctions;
296 
297   // Track potentially dead non-local functions with comdats to see if they can
298   // be deleted as a batch after inlining.
299   SmallVector<Function *, 4> DeadFunctionsInComdats;
300 
301   // Loop forward over all of the calls. Note that we cannot cache the size as
302   // inlining can introduce new calls that need to be processed.
303   for (int I = 0; I < (int)Calls.size(); ++I) {
304     // We expect the calls to typically be batched with sequences of calls that
305     // have the same caller, so we first set up some shared infrastructure for
306     // this caller. We also do any pruning we can at this layer on the caller
307     // alone.
308     Function &F = *Calls[I].first->getCaller();
309     LazyCallGraph::Node &N = *CG.lookup(F);
310     if (CG.lookupSCC(N) != C)
311       continue;
312 
313     LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"
314                       << "    Function size: " << F.getInstructionCount()
315                       << "\n");
316 
317     auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
318       return FAM.getResult<AssumptionAnalysis>(F);
319     };
320 
321     // Now process as many calls as we have within this caller in the sequence.
322     // We bail out as soon as the caller has to change so we can update the
323     // call graph and prepare the context of that new caller.
324     bool DidInline = false;
325     for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) {
326       auto &P = Calls[I];
327       CallBase *CB = P.first;
328       const int InlineHistoryID = P.second;
329       Function &Callee = *CB->getCalledFunction();
330 
331       if (InlineHistoryID != -1 &&
332           inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) {
333         LLVM_DEBUG(dbgs() << "Skipping inlining due to history: " << F.getName()
334                           << " -> " << Callee.getName() << "\n");
335         setInlineRemark(*CB, "recursive");
336         // Set noinline so that we don't forget this decision across CGSCC
337         // iterations.
338         CB->setIsNoInline();
339         continue;
340       }
341 
342       // Check if this inlining may repeat breaking an SCC apart that has
343       // already been split once before. In that case, inlining here may
344       // trigger infinite inlining, much like is prevented within the inliner
345       // itself by the InlineHistory above, but spread across CGSCC iterations
346       // and thus hidden from the full inline history.
347       LazyCallGraph::SCC *CalleeSCC = CG.lookupSCC(*CG.lookup(Callee));
348       if (CalleeSCC == C && UR.InlinedInternalEdges.count({&N, C})) {
349         LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
350                              "previously split out of this SCC by inlining: "
351                           << F.getName() << " -> " << Callee.getName() << "\n");
352         setInlineRemark(*CB, "recursive SCC split");
353         continue;
354       }
355 
356       std::unique_ptr<InlineAdvice> Advice =
357           Advisor.getAdvice(*CB, OnlyMandatory);
358 
359       // Check whether we want to inline this callsite.
360       if (!Advice)
361         continue;
362 
363       if (!Advice->isInliningRecommended()) {
364         Advice->recordUnattemptedInlining();
365         continue;
366       }
367 
368       int CBCostMult =
369           getStringFnAttrAsInt(
370               *CB, InlineConstants::FunctionInlineCostMultiplierAttributeName)
371               .value_or(1);
372 
373       // Setup the data structure used to plumb customization into the
374       // `InlineFunction` routine.
375       InlineFunctionInfo IFI(
376           GetAssumptionCache, PSI,
377           &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())),
378           &FAM.getResult<BlockFrequencyAnalysis>(Callee));
379 
380       InlineResult IR =
381           InlineFunction(*CB, IFI, /*MergeAttributes=*/true,
382                          &FAM.getResult<AAManager>(*CB->getCaller()));
383       if (!IR.isSuccess()) {
384         Advice->recordUnsuccessfulInlining(IR);
385         continue;
386       }
387 
388       DidInline = true;
389       InlinedCallees.insert(&Callee);
390       ++NumInlined;
391 
392       LLVM_DEBUG(dbgs() << "    Size after inlining: "
393                         << F.getInstructionCount() << "\n");
394 
395       // Add any new callsites to defined functions to the worklist.
396       if (!IFI.InlinedCallSites.empty()) {
397         int NewHistoryID = InlineHistory.size();
398         InlineHistory.push_back({&Callee, InlineHistoryID});
399 
400         for (CallBase *ICB : reverse(IFI.InlinedCallSites)) {
401           Function *NewCallee = ICB->getCalledFunction();
402           assert(!(NewCallee && NewCallee->isIntrinsic()) &&
403                  "Intrinsic calls should not be tracked.");
404           if (!NewCallee) {
405             // Try to promote an indirect (virtual) call without waiting for
406             // the post-inline cleanup and the next DevirtSCCRepeatedPass
407             // iteration because the next iteration may not happen and we may
408             // miss inlining it.
409             if (tryPromoteCall(*ICB))
410               NewCallee = ICB->getCalledFunction();
411           }
412           if (NewCallee) {
413             if (!NewCallee->isDeclaration()) {
414               Calls.push_back({ICB, NewHistoryID});
415               // Continually inlining through an SCC can result in huge compile
416               // times and bloated code since we arbitrarily stop at some point
417               // when the inliner decides it's not profitable to inline anymore.
418               // We attempt to mitigate this by making these calls exponentially
419               // more expensive.
420               // This doesn't apply to calls in the same SCC since if we do
421               // inline through the SCC the function will end up being
422               // self-recursive which the inliner bails out on, and inlining
423               // within an SCC is necessary for performance.
424               if (CalleeSCC != C &&
425                   CalleeSCC == CG.lookupSCC(CG.get(*NewCallee))) {
426                 Attribute NewCBCostMult = Attribute::get(
427                     M.getContext(),
428                     InlineConstants::FunctionInlineCostMultiplierAttributeName,
429                     itostr(CBCostMult * IntraSCCCostMultiplier));
430                 ICB->addFnAttr(NewCBCostMult);
431               }
432             }
433           }
434         }
435       }
436 
437       // For local functions or discardable functions without comdats, check
438       // whether this makes the callee trivially dead. In that case, we can drop
439       // the body of the function eagerly which may reduce the number of callers
440       // of other functions to one, changing inline cost thresholds. Non-local
441       // discardable functions with comdats are checked later on.
442       bool CalleeWasDeleted = false;
443       if (Callee.isDiscardableIfUnused() && Callee.hasZeroLiveUses() &&
444           !CG.isLibFunction(Callee)) {
445         if (Callee.hasLocalLinkage() || !Callee.hasComdat()) {
446           Calls.erase(
447               std::remove_if(Calls.begin() + I + 1, Calls.end(),
448                              [&](const std::pair<CallBase *, int> &Call) {
449                                return Call.first->getCaller() == &Callee;
450                              }),
451               Calls.end());
452 
453           // Clear the body and queue the function itself for deletion when we
454           // finish inlining and call graph updates.
455           // Note that after this point, it is an error to do anything other
456           // than use the callee's address or delete it.
457           Callee.dropAllReferences();
458           assert(!is_contained(DeadFunctions, &Callee) &&
459                  "Cannot put cause a function to become dead twice!");
460           DeadFunctions.push_back(&Callee);
461           CalleeWasDeleted = true;
462         } else {
463           DeadFunctionsInComdats.push_back(&Callee);
464         }
465       }
466       if (CalleeWasDeleted)
467         Advice->recordInliningWithCalleeDeleted();
468       else
469         Advice->recordInlining();
470     }
471 
472     // Back the call index up by one to put us in a good position to go around
473     // the outer loop.
474     --I;
475 
476     if (!DidInline)
477       continue;
478     Changed = true;
479 
480     // At this point, since we have made changes we have at least removed
481     // a call instruction. However, in the process we do some incremental
482     // simplification of the surrounding code. This simplification can
483     // essentially do all of the same things as a function pass and we can
484     // re-use the exact same logic for updating the call graph to reflect the
485     // change.
486 
487     // Inside the update, we also update the FunctionAnalysisManager in the
488     // proxy for this particular SCC. We do this as the SCC may have changed and
489     // as we're going to mutate this particular function we want to make sure
490     // the proxy is in place to forward any invalidation events.
491     LazyCallGraph::SCC *OldC = C;
492     C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM);
493     LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
494 
495     // If this causes an SCC to split apart into multiple smaller SCCs, there
496     // is a subtle risk we need to prepare for. Other transformations may
497     // expose an "infinite inlining" opportunity later, and because of the SCC
498     // mutation, we will revisit this function and potentially re-inline. If we
499     // do, and that re-inlining also has the potentially to mutate the SCC
500     // structure, the infinite inlining problem can manifest through infinite
501     // SCC splits and merges. To avoid this, we capture the originating caller
502     // node and the SCC containing the call edge. This is a slight over
503     // approximation of the possible inlining decisions that must be avoided,
504     // but is relatively efficient to store. We use C != OldC to know when
505     // a new SCC is generated and the original SCC may be generated via merge
506     // in later iterations.
507     //
508     // It is also possible that even if no new SCC is generated
509     // (i.e., C == OldC), the original SCC could be split and then merged
510     // into the same one as itself. and the original SCC will be added into
511     // UR.CWorklist again, we want to catch such cases too.
512     //
513     // FIXME: This seems like a very heavyweight way of retaining the inline
514     // history, we should look for a more efficient way of tracking it.
515     if ((C != OldC || UR.CWorklist.count(OldC)) &&
516         llvm::any_of(InlinedCallees, [&](Function *Callee) {
517           return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
518         })) {
519       LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
520                            "retaining this to avoid infinite inlining.\n");
521       UR.InlinedInternalEdges.insert({&N, OldC});
522     }
523     InlinedCallees.clear();
524 
525     // Invalidate analyses for this function now so that we don't have to
526     // invalidate analyses for all functions in this SCC later.
527     FAM.invalidate(F, PreservedAnalyses::none());
528   }
529 
530   // We must ensure that we only delete functions with comdats if every function
531   // in the comdat is going to be deleted.
532   if (!DeadFunctionsInComdats.empty()) {
533     filterDeadComdatFunctions(DeadFunctionsInComdats);
534     for (auto *Callee : DeadFunctionsInComdats)
535       Callee->dropAllReferences();
536     DeadFunctions.append(DeadFunctionsInComdats);
537   }
538 
539   // Now that we've finished inlining all of the calls across this SCC, delete
540   // all of the trivially dead functions, updating the call graph and the CGSCC
541   // pass manager in the process.
542   //
543   // Note that this walks a pointer set which has non-deterministic order but
544   // that is OK as all we do is delete things and add pointers to unordered
545   // sets.
546   for (Function *DeadF : DeadFunctions) {
547     // Get the necessary information out of the call graph and nuke the
548     // function there. Also, clear out any cached analyses.
549     auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
550     FAM.clear(*DeadF, DeadF->getName());
551     AM.clear(DeadC, DeadC.getName());
552     auto &DeadRC = DeadC.getOuterRefSCC();
553     CG.removeDeadFunction(*DeadF);
554 
555     // Mark the relevant parts of the call graph as invalid so we don't visit
556     // them.
557     UR.InvalidatedSCCs.insert(&DeadC);
558     UR.InvalidatedRefSCCs.insert(&DeadRC);
559 
560     // If the updated SCC was the one containing the deleted function, clear it.
561     if (&DeadC == UR.UpdatedC)
562       UR.UpdatedC = nullptr;
563 
564     // And delete the actual function from the module.
565     M.getFunctionList().erase(DeadF);
566 
567     ++NumDeleted;
568   }
569 
570   if (!Changed)
571     return PreservedAnalyses::all();
572 
573   PreservedAnalyses PA;
574   // Even if we change the IR, we update the core CGSCC data structures and so
575   // can preserve the proxy to the function analysis manager.
576   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
577   // We have already invalidated all analyses on modified functions.
578   PA.preserveSet<AllAnalysesOn<Function>>();
579   return PA;
580 }
581 
582 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
583                                                    bool MandatoryFirst,
584                                                    InlineContext IC,
585                                                    InliningAdvisorMode Mode,
586                                                    unsigned MaxDevirtIterations)
587     : Params(Params), IC(IC), Mode(Mode),
588       MaxDevirtIterations(MaxDevirtIterations) {
589   // Run the inliner first. The theory is that we are walking bottom-up and so
590   // the callees have already been fully optimized, and we want to inline them
591   // into the callers so that our optimizations can reflect that.
592   // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO
593   // because it makes profile annotation in the backend inaccurate.
594   if (MandatoryFirst) {
595     PM.addPass(InlinerPass(/*OnlyMandatory*/ true));
596     if (EnablePostSCCAdvisorPrinting)
597       PM.addPass(InlineAdvisorAnalysisPrinterPass(dbgs()));
598   }
599   PM.addPass(InlinerPass());
600   if (EnablePostSCCAdvisorPrinting)
601     PM.addPass(InlineAdvisorAnalysisPrinterPass(dbgs()));
602 }
603 
604 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
605                                                 ModuleAnalysisManager &MAM) {
606   auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M);
607   if (!IAA.tryCreate(Params, Mode,
608                      {CGSCCInlineReplayFile,
609                       CGSCCInlineReplayScope,
610                       CGSCCInlineReplayFallback,
611                       {CGSCCInlineReplayFormat}},
612                      IC)) {
613     M.getContext().emitError(
614         "Could not setup Inlining Advisor for the requested "
615         "mode and/or options");
616     return PreservedAnalyses::all();
617   }
618 
619   // We wrap the CGSCC pipeline in a devirtualization repeater. This will try
620   // to detect when we devirtualize indirect calls and iterate the SCC passes
621   // in that case to try and catch knock-on inlining or function attrs
622   // opportunities. Then we add it to the module pipeline by walking the SCCs
623   // in postorder (or bottom-up).
624   // If MaxDevirtIterations is 0, we just don't use the devirtualization
625   // wrapper.
626   if (MaxDevirtIterations == 0)
627     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM)));
628   else
629     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
630         createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations)));
631 
632   MPM.addPass(std::move(AfterCGMPM));
633   MPM.run(M, MAM);
634 
635   // Discard the InlineAdvisor, a subsequent inlining session should construct
636   // its own.
637   auto PA = PreservedAnalyses::all();
638   if (!KeepAdvisorForPrinting)
639     PA.abandon<InlineAdvisorAnalysis>();
640   return PA;
641 }
642 
643 void InlinerPass::printPipeline(
644     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
645   static_cast<PassInfoMixin<InlinerPass> *>(this)->printPipeline(
646       OS, MapClassName2PassName);
647   if (OnlyMandatory)
648     OS << "<only-mandatory>";
649 }
650 
651 void ModuleInlinerWrapperPass::printPipeline(
652     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
653   // Print some info about passes added to the wrapper. This is however
654   // incomplete as InlineAdvisorAnalysis part isn't included (which also depends
655   // on Params and Mode).
656   if (!MPM.isEmpty()) {
657     MPM.printPipeline(OS, MapClassName2PassName);
658     OS << ',';
659   }
660   OS << "cgscc(";
661   if (MaxDevirtIterations != 0)
662     OS << "devirt<" << MaxDevirtIterations << ">(";
663   PM.printPipeline(OS, MapClassName2PassName);
664   if (MaxDevirtIterations != 0)
665     OS << ')';
666   OS << ')';
667 }
668